├── .gitignore
├── README.md
├── distributions.py
├── envs.py
├── imgs
└── comparison.png
├── model.py
├── params.py
├── ppo.py
├── result.ipynb
├── storage.py
├── trainer.py
├── trainer_plus.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 | .vscode
3 | __pycache__
4 | log
5 | tf_log
6 |
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PPO - PyTorch
2 |
3 | ## This implementation is inspired by:
4 | 1. OpenAI Tensorflow code: https://github.com/openai/baselines/tree/master/baselines/ppo2
5 | 2. https://github.com/ikostrikov/pytorch-a2c-ppo-acktr
6 |
7 | To run training:
8 |
9 | ```bash
10 | python trainer_plus.py
11 | ```
12 |
13 |
14 | Comparison between OpenAI implementation and this implementation in Atari game `BreakOut`:
15 |
16 |
17 | 
18 |
19 |
20 | ## Disclaimer
21 | The Pytorch implementation is much cleaner and runs a bit faster in terms of wall-clock time, yet still achieve comparable performance in the `BreakOut` environment.
22 |
--------------------------------------------------------------------------------
/distributions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 |
5 |
6 | class Categorical(nn.Module):
7 | def __init__(self, num_inputs, num_outputs):
8 | super(Categorical, self).__init__()
9 | self.linear = nn.Linear(num_inputs, num_outputs)
10 |
11 |
12 | def forward(self, x):
13 | x = self.linear(x)
14 | return x
15 |
16 |
17 | def sample(self, x, deterministic):
18 | x = self(x)
19 | probs = F.softmax(x)
20 |
21 | if deterministic is False:
22 | action = probs.multinomial()
23 | else:
24 | action = probs.max(1, keepdim=True)[1]
25 |
26 | return action
27 |
28 |
29 | def logprobs_and_entropy(self, x, action):
30 | x = self(x)
31 | log_probs = F.log_softmax(x)
32 | probs = F.softmax(x)
33 |
34 | action_log_probs = log_probs.gather(1, action)
35 | dist_entropy = -(log_probs * probs).sum(-1).mean()
36 | return action_log_probs, dist_entropy
37 |
38 |
--------------------------------------------------------------------------------
/envs.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import gym
3 | from gym.spaces.box import Box
4 |
5 | from baselines import bench
6 | from baselines.common.atari_wrappers import make_atari, wrap_deepmind
7 | from baselines import bench, logger
8 | from baselines.common.vec_env import VecEnv
9 | import numpy as np
10 | from gym import spaces
11 |
12 |
13 | class PyTorch_VecFrameStack(VecEnv):
14 | """
15 | Vectorized environment base class
16 | """
17 | def __init__(self, venv, nstack):
18 | self.venv = venv
19 | self.nstack = nstack
20 | wos = venv.observation_space # wrapped ob space
21 | low = np.repeat(wos.low, self.nstack, axis=-1)
22 | high = np.repeat(wos.high, self.nstack, axis=-1)
23 | self.stackedobs = np.zeros((venv.num_envs,)+low.shape, low.dtype)
24 | self._observation_space = spaces.Box(low=low, high=high)
25 | self._action_space = venv.action_space
26 | def step(self, vac):
27 | """
28 | Apply sequence of actions to sequence of environments
29 | actions -> (observations, rewards, news)
30 | where 'news' is a boolean vector indicating whether each element is new.
31 | """
32 | obs, rews, news, infos = self.venv.step(vac)
33 | self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
34 | for (i, new) in enumerate(news):
35 | if new:
36 | self.stackedobs[i] = 0
37 | self.stackedobs[..., -obs.shape[-1]:] = obs
38 | out = np.transpose(self.stackedobs, (0, 3, 1, 2))
39 | # return self.stackedobs, rews, news, infos
40 | return out, rews, news, infos
41 | def reset(self):
42 | """
43 | Reset all environments
44 | """
45 | obs = self.venv.reset()
46 | self.stackedobs[...] = 0
47 | self.stackedobs[..., -obs.shape[-1]:] = obs
48 | out = np.transpose(self.stackedobs, (0, 3, 1, 2))
49 | # return self.stackedobs
50 | return out
51 | @property
52 | def action_space(self):
53 | return self._action_space
54 | @property
55 | def observation_space(self):
56 | return self._observation_space
57 | def close(self):
58 | self.venv.close()
59 | @property
60 | def num_envs(self):
61 | return self.venv.num_envs
62 |
63 |
64 | def make_env(rank, env_id):
65 | def env_fn():
66 | env = make_atari(env_id)
67 | env.seed(1 + rank)
68 | env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
69 | env = wrap_deepmind(env)
70 | return env
71 | return env_fn
72 |
73 |
74 | class ScaledFloatFrame(gym.ObservationWrapper):
75 | def _observation(self, observation):
76 | # careful! This undoes the memory optimization, use
77 | # with smaller replay buffers only.
78 | return np.array(observation).astype(np.float32) / 255.0
79 |
80 |
81 | class WrapPyTorch(gym.ObservationWrapper):
82 | def _observation(self, observation):
83 | return observation.transpose(2, 0, 1)
--------------------------------------------------------------------------------
/imgs/comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dai-dao/PPO-Pytorch/cc2af6ef80667feda235a0a7dcdd91b5e78fe30a/imgs/comparison.png
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from utils import orthogonal
5 | from distributions import Categorical
6 | import numpy as np
7 |
8 |
9 | def weight_init(m):
10 | classname = m.__class__.__name__
11 |
12 | if classname.find('Conv') != -1 or classname.find('Linear') != -1:
13 | orthogonal(m.weight.data)
14 |
15 | if m.bias is not None:
16 | m.bias.data.fill_(0)
17 |
18 |
19 | class FFPolicy(nn.Module):
20 | def __init__(self):
21 | super(FFPolicy, self).__init__()
22 | self.dist = None
23 |
24 |
25 | def forward(self, inputs, states, masks):
26 | raise NotImplementedError
27 |
28 |
29 | def act(self, inputs, deterministic=False):
30 | value, x = self(inputs)
31 | action = self.dist.sample(x, deterministic=deterministic)
32 | action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, action)
33 | return value, action, action_log_probs
34 |
35 |
36 | def evaluate_actions(self, inputs, actions):
37 | value, x = self(inputs)
38 | action_log_probs, dist_entropy = self.dist.logprobs_and_entropy(x, actions)
39 | return value, action_log_probs, dist_entropy
40 |
41 |
42 | class CNNPolicy(FFPolicy):
43 | def __init__(self, num_inputs, action_space):
44 | super(CNNPolicy, self).__init__()
45 |
46 | self.conv1 = nn.Conv2d(num_inputs, 32, 8, stride=4)
47 | self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
48 | self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
49 | self.linear1 = nn.Linear(32 * 7 * 7, 512)
50 | self.critic_linear = nn.Linear(512, 1)
51 | self.dist = Categorical(512, action_space.n)
52 |
53 | self.train()
54 | self.reset_parameters()
55 |
56 |
57 | @property
58 | def state_size(self):
59 | if hasattr(self, 'gru'):
60 | return 512
61 | else:
62 | return 1
63 |
64 |
65 | def reset_parameters(self):
66 | relu_gain = nn.init.calculate_gain('relu')
67 | self.conv1.weight.data.mul_(relu_gain)
68 | self.conv2.weight.data.mul_(relu_gain)
69 | self.conv3.weight.data.mul_(relu_gain)
70 | self.linear1.weight.data.mul_(relu_gain)
71 |
72 |
73 | def forward(self, inputs):
74 | x = F.relu(self.conv1(inputs / 255.))
75 | x = F.relu(self.conv2(x))
76 | x = F.relu(self.conv3(x))
77 | x = x.view(-1, 32 * 7 * 7)
78 | x = F.relu(self.linear1(x))
79 |
80 | return self.critic_linear(x), x
81 |
82 |
--------------------------------------------------------------------------------
/params.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class Params(object):
5 | def __init__(self):
6 | self.seed = 1
7 | self.env_name = 'PongNoFrameskip-v4'
8 | self.log_dir = './log'
9 |
10 | self.cuda = torch.cuda.is_available()
11 | self.num_processes = 8
12 | self.num_stack = 4
13 | self.lr = 2.5e-4
14 | self.eps = 1e-5
15 | self.num_steps = 128
16 | self.num_mini_batch = 4
17 | self.log_interval = 1
18 | self.clip_param = 0.1
19 | self.use_gae = True
20 | self.num_frames = 10e6
21 |
22 | self.gamma = 0.99
23 | self.tau = 0.95
24 | self.ppo_epochs = 4
25 | self.entropy_coef = 0.01
26 | self.max_grad_norm = 0.5
27 |
28 |
29 | class Breakout_Params(object):
30 | def __init__(self):
31 | self.log_interval = 1
32 | self.cuda = torch.cuda.is_available()
33 | self.seed = 1
34 | self.num_stack = 4
35 | self.lr = 2.5e-4
36 | self.eps = 1e-5
37 | self.env_name = 'BreakoutNoFrameskip-v4'
38 | self.log_dir = './log'
39 | self.num_processes = 8
40 | self.clip_param = 0.1
41 | self.entropy_coef = 0.01
42 | self.max_grad_norm = 0.5
43 | self.gamma = 0.99
44 | self.tau = 0.95
45 |
46 |
47 |
48 | self.value_coefficient = 0.5
49 | self.entropy_coefficient = 0.01
50 | self.gamma = 0.99
51 | self.lam = 0.95
52 |
53 | self.nsteps = 128
54 | self.nminibatches = 4
55 | self.num_update_epochs = 4
56 | self.lr_schedule = lambda x : x * 2.5e-4
57 | self.clip_range_schedule = lambda x : x * 0.1
58 | self.num_timesteps = int(10e6 * 1.1)
59 |
60 | self.epsilon_min = 0.1
61 | self.annealing_end = 1000000.
--------------------------------------------------------------------------------
/ppo.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.autograd import Variable
5 |
6 | import numpy as np
7 | import time
8 | from model import *
9 | import torch.optim as optim
10 | from storage import RolloutStorage
11 | import torch
12 |
13 |
14 |
15 | class PPO_Discrete(object):
16 | def __init__(self, env, args):
17 | self.env = env
18 | self.args = args
19 | self.obs_shape = self.env.observation_space.shape
20 | self.net = CNNPolicy(4, self.env.action_space)
21 | self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy']
22 |
23 | if self.args.cuda:
24 | self.net.cuda()
25 |
26 | self.optimizer = optim.Adam(self.net.parameters(), self.args.lr, eps=self.args.eps)
27 |
28 |
29 | def step(self, s):
30 | # s = np.transpose(s, (0, 3, 1, 2))
31 | # s = Variable(torch.from_numpy(s).type(self.T), volatile=True)
32 | s = Variable(s, volatile=True)
33 | value, action, action_log_prob = self.net.act(s)
34 | # cpu_actions = action.data.cpu().numpy().astype(np.int32).reshape((-1))
35 | # value = value.data.cpu().numpy().reshape((-1))
36 | # action_log_prob = action_log_prob.data.cpu().numpy().reshape((-1))
37 | # return value, cpu_actions, action_log_prob
38 | return value, action, action_log_prob
39 |
40 |
41 | def update(self, obs, returns, masks, actions, values, neglogpacs, lrnow, cliprange_now):
42 | advantages = returns - values
43 | advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
44 |
45 | obs = Variable(obs)
46 | actions = Variable(actions).view(-1, 1)
47 | values = Variable(values).view(-1, 1)
48 | returns = Variable(returns).view(-1, 1)
49 | oldpi_prob = Variable(neglogpacs).view(-1, 1)
50 | advantages = Variable(advantages).view(-1, 1)
51 |
52 | vpred, action_log_probs, dist_entropy = self.net.evaluate_actions(obs, actions)
53 | ratio = torch.exp(action_log_probs - oldpi_prob)
54 | surr1 = ratio * advantages
55 | surr2 = torch.clamp(ratio, 1.0 - cliprange_now, 1.0 + cliprange_now) * advantages
56 | action_loss = -torch.min(surr1, surr2).mean()
57 |
58 | value_loss = (returns - vpred).pow(2).mean()
59 |
60 | for param_group in self.optimizer.param_groups:
61 | param_group['lr'] = lrnow
62 |
63 | self.optimizer.zero_grad()
64 | (value_loss + action_loss - dist_entropy * self.args.entropy_coef).backward()
65 | nn.utils.clip_grad_norm(self.net.parameters(), self.args.max_grad_norm)
66 | self.optimizer.step()
67 |
68 | return action_loss.data.cpu().numpy()[0], value_loss.data.cpu().numpy()[0], \
69 | dist_entropy.data.cpu().numpy()[0]
70 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/result.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import pandas as pd\n",
12 | "import numpy as np\n",
13 | "import matplotlib.pyplot as plt\n",
14 | "import matplotlib.patches as mpatches"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 2,
20 | "metadata": {},
21 | "outputs": [
22 | {
23 | "data": {
24 | "text/html": [
25 | "
\n",
26 | "\n",
39 | "
\n",
40 | " \n",
41 | " \n",
42 | " | \n",
43 | " policy_entropy | \n",
44 | " eplenmean | \n",
45 | " eprewmean | \n",
46 | " policy_loss | \n",
47 | " value_loss | \n",
48 | " fps | \n",
49 | " total_timesteps | \n",
50 | " time_elapsed | \n",
51 | " approxkl | \n",
52 | " serial_timesteps | \n",
53 | " clipfrac | \n",
54 | " nupdates | \n",
55 | " explained_variance | \n",
56 | "
\n",
57 | " \n",
58 | " \n",
59 | " \n",
60 | " 0 | \n",
61 | " 1.38488 | \n",
62 | " 125.666667 | \n",
63 | " NaN | \n",
64 | " -0.001048 | \n",
65 | " 0.106331 | \n",
66 | " 242 | \n",
67 | " 1024 | \n",
68 | " 4.226129 | \n",
69 | " 0.001445 | \n",
70 | " 128 | \n",
71 | " 0.016846 | \n",
72 | " 1 | \n",
73 | " -0.004680 | \n",
74 | "
\n",
75 | " \n",
76 | " 1 | \n",
77 | " 1.38181 | \n",
78 | " 155.428571 | \n",
79 | " 0.714286 | \n",
80 | " -0.001064 | \n",
81 | " 0.046059 | \n",
82 | " 561 | \n",
83 | " 2048 | \n",
84 | " 6.051087 | \n",
85 | " 0.000813 | \n",
86 | " 256 | \n",
87 | " 0.039795 | \n",
88 | " 2 | \n",
89 | " 0.013004 | \n",
90 | "
\n",
91 | " \n",
92 | " 2 | \n",
93 | " 1.38156 | \n",
94 | " 175.571429 | \n",
95 | " 1.214286 | \n",
96 | " -0.000501 | \n",
97 | " 0.037005 | \n",
98 | " 564 | \n",
99 | " 3072 | \n",
100 | " 7.866562 | \n",
101 | " 0.000572 | \n",
102 | " 384 | \n",
103 | " 0.019531 | \n",
104 | " 3 | \n",
105 | " -0.000792 | \n",
106 | "
\n",
107 | " \n",
108 | " 3 | \n",
109 | " 1.38467 | \n",
110 | " 180.950000 | \n",
111 | " 1.300000 | \n",
112 | " -0.000580 | \n",
113 | " 0.030860 | \n",
114 | " 568 | \n",
115 | " 4096 | \n",
116 | " 9.670195 | \n",
117 | " 0.000193 | \n",
118 | " 512 | \n",
119 | " NaN | \n",
120 | " 4 | \n",
121 | " 0.031553 | \n",
122 | "
\n",
123 | " \n",
124 | " 4 | \n",
125 | " 1.38546 | \n",
126 | " 177.407407 | \n",
127 | " 1.259259 | \n",
128 | " -0.000414 | \n",
129 | " 0.040690 | \n",
130 | " 566 | \n",
131 | " 5120 | \n",
132 | " 11.479911 | \n",
133 | " 0.000185 | \n",
134 | " 640 | \n",
135 | " NaN | \n",
136 | " 5 | \n",
137 | " 0.074548 | \n",
138 | "
\n",
139 | " \n",
140 | "
\n",
141 | "
"
142 | ],
143 | "text/plain": [
144 | " policy_entropy eplenmean eprewmean policy_loss value_loss fps \\\n",
145 | "0 1.38488 125.666667 NaN -0.001048 0.106331 242 \n",
146 | "1 1.38181 155.428571 0.714286 -0.001064 0.046059 561 \n",
147 | "2 1.38156 175.571429 1.214286 -0.000501 0.037005 564 \n",
148 | "3 1.38467 180.950000 1.300000 -0.000580 0.030860 568 \n",
149 | "4 1.38546 177.407407 1.259259 -0.000414 0.040690 566 \n",
150 | "\n",
151 | " total_timesteps time_elapsed approxkl serial_timesteps clipfrac \\\n",
152 | "0 1024 4.226129 0.001445 128 0.016846 \n",
153 | "1 2048 6.051087 0.000813 256 0.039795 \n",
154 | "2 3072 7.866562 0.000572 384 0.019531 \n",
155 | "3 4096 9.670195 0.000193 512 NaN \n",
156 | "4 5120 11.479911 0.000185 640 NaN \n",
157 | "\n",
158 | " nupdates explained_variance \n",
159 | "0 1 -0.004680 \n",
160 | "1 2 0.013004 \n",
161 | "2 3 -0.000792 \n",
162 | "3 4 0.031553 \n",
163 | "4 5 0.074548 "
164 | ]
165 | },
166 | "execution_count": 2,
167 | "metadata": {},
168 | "output_type": "execute_result"
169 | }
170 | ],
171 | "source": [
172 | "tf_result = pd.read_csv('tf_Log/progress.csv')\n",
173 | "tf_result.head()"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": 3,
179 | "metadata": {},
180 | "outputs": [
181 | {
182 | "data": {
183 | "text/html": [
184 | "\n",
185 | "\n",
198 | "
\n",
199 | " \n",
200 | " \n",
201 | " | \n",
202 | " Update_time | \n",
203 | " policy_loss | \n",
204 | " policy_entropy | \n",
205 | " eprewmean | \n",
206 | " eplenmean | \n",
207 | " serial_timestep | \n",
208 | " value_loss | \n",
209 | " Run_time | \n",
210 | " num_updates | \n",
211 | " total_timesteps | \n",
212 | "
\n",
213 | " \n",
214 | " \n",
215 | " \n",
216 | " 0 | \n",
217 | " 0.484468 | \n",
218 | " -0.001897 | \n",
219 | " 1.38595 | \n",
220 | " NaN | \n",
221 | " 127.500000 | \n",
222 | " 128 | \n",
223 | " 0.036348 | \n",
224 | " 2.540672 | \n",
225 | " 1 | \n",
226 | " 1024 | \n",
227 | "
\n",
228 | " \n",
229 | " 1 | \n",
230 | " 0.383960 | \n",
231 | " -0.002251 | \n",
232 | " 1.38156 | \n",
233 | " 0.888889 | \n",
234 | " 160.777778 | \n",
235 | " 256 | \n",
236 | " 0.063263 | \n",
237 | " 1.304610 | \n",
238 | " 2 | \n",
239 | " 2048 | \n",
240 | "
\n",
241 | " \n",
242 | " 2 | \n",
243 | " 0.362070 | \n",
244 | " -0.000884 | \n",
245 | " 1.38155 | \n",
246 | " 1.230769 | \n",
247 | " 175.384615 | \n",
248 | " 384 | \n",
249 | " 0.075574 | \n",
250 | " 1.289811 | \n",
251 | " 3 | \n",
252 | " 3072 | \n",
253 | "
\n",
254 | " \n",
255 | " 3 | \n",
256 | " 0.361384 | \n",
257 | " -0.000217 | \n",
258 | " 1.38075 | \n",
259 | " 1.250000 | \n",
260 | " 176.750000 | \n",
261 | " 512 | \n",
262 | " 0.077710 | \n",
263 | " 1.343870 | \n",
264 | " 4 | \n",
265 | " 4096 | \n",
266 | "
\n",
267 | " \n",
268 | " 4 | \n",
269 | " 0.361758 | \n",
270 | " -0.000159 | \n",
271 | " 1.37747 | \n",
272 | " 1.480000 | \n",
273 | " 186.040000 | \n",
274 | " 640 | \n",
275 | " 0.063903 | \n",
276 | " 1.319346 | \n",
277 | " 5 | \n",
278 | " 5120 | \n",
279 | "
\n",
280 | " \n",
281 | "
\n",
282 | "
"
283 | ],
284 | "text/plain": [
285 | " Update_time policy_loss policy_entropy eprewmean eplenmean \\\n",
286 | "0 0.484468 -0.001897 1.38595 NaN 127.500000 \n",
287 | "1 0.383960 -0.002251 1.38156 0.888889 160.777778 \n",
288 | "2 0.362070 -0.000884 1.38155 1.230769 175.384615 \n",
289 | "3 0.361384 -0.000217 1.38075 1.250000 176.750000 \n",
290 | "4 0.361758 -0.000159 1.37747 1.480000 186.040000 \n",
291 | "\n",
292 | " serial_timestep value_loss Run_time num_updates total_timesteps \n",
293 | "0 128 0.036348 2.540672 1 1024 \n",
294 | "1 256 0.063263 1.304610 2 2048 \n",
295 | "2 384 0.075574 1.289811 3 3072 \n",
296 | "3 512 0.077710 1.343870 4 4096 \n",
297 | "4 640 0.063903 1.319346 5 5120 "
298 | ]
299 | },
300 | "execution_count": 3,
301 | "metadata": {},
302 | "output_type": "execute_result"
303 | }
304 | ],
305 | "source": [
306 | "torch_result = pd.read_csv('log/progress.csv')\n",
307 | "torch_result.head()"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": 4,
313 | "metadata": {},
314 | "outputs": [
315 | {
316 | "name": "stdout",
317 | "output_type": "stream",
318 | "text": [
319 | "3414\n",
320 | "3414\n",
321 | "3414\n",
322 | "3414\n"
323 | ]
324 | }
325 | ],
326 | "source": [
327 | "tf_rew = tf_result['eprewmean'][1:]\n",
328 | "tf_num_updates = tf_result['nupdates'][1:]\n",
329 | "\n",
330 | "\n",
331 | "torch_rew = torch_result['eprewmean'][1:3415]\n",
332 | "torch_num_udpates = torch_result['num_updates'][1:3415]\n",
333 | "\n",
334 | "print(len(tf_num_updates))\n",
335 | "print(len(tf_rew))\n",
336 | "\n",
337 | "print(len(torch_rew))\n",
338 | "print(len(torch_num_udpates))"
339 | ]
340 | },
341 | {
342 | "cell_type": "code",
343 | "execution_count": 5,
344 | "metadata": {},
345 | "outputs": [
346 | {
347 | "data": {
348 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEKCAYAAAAfGVI8AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3XeYFMXWwOHfYYkSJCmgSFZQ0gIr\nQYKiBAUDivEzoBguZlARFLyKiop4RVGuCUQUBBOGawSRDIogKMmABAXJObO7c74/amZnNveG2dmZ\nPe/zzNPdNd091czSZ7q76pSoKsYYY4quYpGugDHGmMiyQGCMMUWcBQJjjCniLBAYY0wRZ4HAGGOK\nOAsExhhTxFkgMMaYIs4CgTHGFHEWCIwxpogrHukKeFG1alWtU6dOpKthjDFRZcmSJTtU9YTs1ouK\nQFCnTh0WL14c6WoYY0xUEZENXtazW0PGGFPEWSAwxpgizgKBMcYUcVHxjCAjiYmJbNy4kSNHjkS6\nKqYIKF26NDVr1qREiRKRroox+S5qA8HGjRspX748derUQUQiXR0Tw1SVnTt3snHjRurWrRvp6hiT\n76L21tCRI0eoUqWKBQETdiJClSpV7OrTxKyoDQSABQFTYOxvzcSyqA4ExhhTqM2dC8uWRboW2Yqd\nQFC9Oojk36t69Ww/Mi4ujvj4eJo0acIVV1zBoUOHMlxv+fLlxMfHEx8fT+XKlalbty7x8fF06dIl\nz4d93XXX8cknn+R5P5lJSkqiYsWKYdu/MTGtUydo0QI2eOrXFTGxEwi2bi3w/ZUpU4Zly5axYsUK\nSpYsyauvvprhek2bNmXZsmUsW7aMiy++mJEjR7Js2TK+/fZbT1VJSkrKUdWNMYVMnTowaFCka5Gp\n2AkEEdaxY0fWrFnDv//9b1544YWU8iFDhvDiiy9mup3P5+O+++6jSZMmNG3alA8//BCAb7/9lnPO\nOYcLL7yQpk2bAjB+/HiaNWtG8+bNuemmm1L2MXPmTM466yzq1avHxx9/nOHnZLTtunXr6Ny5M82a\nNaNr165s3LgRgD///JM2bdrQtGlTHn300VT7eeaZZ2jdujXNmjXj8ccfz8W/lDFFSGhz42efhUce\niVxdsqKqhf7VqlUrTWvVqlWpCyD/X9koW7asqqomJibqxRdfrP/973913bp12qJFC1VVTU5O1nr1\n6umOHTtStunTp49+8MEHKctTpkzR888/X5OSknTz5s1as2ZN3bp1q06fPl3Lli2rGzZsUFXVZcuW\nacOGDXXnzp2qqinTa6+9Vq+++mr1+Xz6888/a8OGDdPVM7Ntzz//fJ04caKqqr722mvau3dvVVW9\n4IILdNKkSaqq+sILL+jxxx+vqqpffPGF3n777erz+TQ5OVm7d++u8+fPz/bfKVak+5szJjN79qju\n3atas6bqjTcGzyn166v6fAVWDWCxejjH2hVBHhw+fJj4+HgSEhKoVasWN998M3Xq1KFKlSosXbqU\nadOm0aJFC6pUqZLpPubNm8c111xDXFwc1atXp0OHDikJ9tq1a0etWrUA+O6777jqqquoXLkyQMoU\noFevXogIzZo1Y9OmTek+I7Ntf/jhB66++moAbrjhBubOnQvAwoULueqqqwC4/vrrU/Yzbdo0vvrq\nK1q0aEHLli1Zs2YNv//+e+7+8YyJZRUrwvHHw86dULp0sPzPP6FYMfjuO9i0yb3mz4ft2yNXV6K4\nQ1lhEHhGkNYtt9zCW2+9xZYtW+jbt2+u91+2bFlP65UqVSpl3v0IyLuMmkuqKkOHDuXmm2/Ol88w\nJuYdPgxlyqQvP++89GWB64aJE2H2bBg+HKpUgeLhP03bFUEYXHrppXz99df8+OOPdO/ePct1O3bs\nyJQpU/D5fGzdupX58+eTkJCQbr1zzz2X9957j127dgGkTL3IbNu2bdvy/vvvAzBx4kQ6deoEuCuR\nQPmkSZNS9tO9e3fGjRvHwYMHAde7e8eOHZ7rYUyRdMkl3tZ74gl3tXDDDTBunGu5eOut4a2bX+wE\ngmrVCs3+SpYsSefOnbnyyiuJi4vLct3LL7+cRo0a0axZM7p06cLzzz/PiSeemG695s2b8+CDD9Kp\nUyfi4+MZOHCg5/pktu2YMWN4/fXXadasGe+99x6jRo0CYPTo0YwaNYpmzZqxNaT1VI8ePbj88stp\n27YtTZs25corr+TAgQOe62FMzDtyBJ5/PnVZkyYQ+EG4eDFccEHG2/773+nL5s/P3/plQvLrVkI4\nJSQkaNqBaVavXs3pp58eoRplzefz0bJlSz744ANOPfXUSFfH5JPC/DdnCoFff4WM/j6Sk2HPHnjn\nHbjnHtdP6b33oEEDdyuoY0cXQDISFwd5aD4uIktUNf0thjRi54qgkFi1ahUNGjTgvPPOsyBgTFEy\neXLG5cWKQeXKcO+9LggAXHUVtGoFCQlw6BCceaYr9zfeSJGcDIsWha/OfvawOJ+dccYZrF27NtLV\nMMZkZtcu9wu9Xr383e+ff6Yv++WX7LcTgc2b3Xy5csHy5cthwQIXMMLMrgiMMUVL9epQv75rkbN3\nb/7sc+tWmDTJ7TNAFfydQbMVaD4aaJFXsqR7tnDbbe72UJhZIDDGFB0HDkBiopvftQsefDB/9jtk\niJv26uWm5cvnbPtPPoF+/dytoksvhS+/zJ96eWSBwBhTdKxenXr59dfzZ7+BpHLPPuumgwfnbPvz\nz4dXXnF9BqZOzbifQRjZMwJjTNERaO582mkQ6BV/8CB47LyZqc2b3dVA5cru4W9GncgKsZi5Iijo\nLNQ7d+5MSS1dvXp1Tj755JTlY8eOFcxB+9133300btyYwYMHM3To0FRJ7/LD1KlTadq0aUp/h//9\n73/5uv9QixcvRkRSZWYNRypsS69dRB096qYTJsBxx7n5sWNdJ6683CbasgVq1HDzURYEIMxXBCIy\nALgFUGA5cBNQA5gCVAGWANerap7PnAWdhbpKlSop6SUee+wxypUrxwMPPJC/lchGUlIScXFxvPnm\nm+zatYtixYoxdOjQfP2Mn376iUGDBvHtt99Su3Zt/vzzT7p27Uq9evVo3Lhxvn4WwOTJk+nQoQOT\nJ0/Ol/EajEkl0F6/VClYvx5OPBH69w++f8opcNddwWaeXhw75nIKBQJBFArbFYGInAzcAySoahMg\nDrgaGAGMUtUGwG4g5hLXTJgwgdatWxMfH88dd9yBz+dL+QU6ePBgmjdvTrt27di2bRsAU6ZMoUmT\nJjRv3pzOnTsDLqFdnz59aNq0KS1btmTOnDkAjB07ll69etG5c2e6d+9Oz5492b9/Py1btkxJYR3w\n008/0aZNG5o1a0bv3r3Zu3cvmzdvpnXr1gAsWbIEEeGff/4BoF69eunG5R05ciSPPPIItWvXBqB+\n/foMGjSI5557DoAOHTrQv39/4uPjadq0aUrCvAMHDnDjjTfSunVrWrRokXIVMXbsWC6//HK6d+/O\nqaeeykMPPZTyWT6fj48++ogJEybw1VdfebqysvTaJkf8iRWpUAFOOMHdmw91zz3g/3vxZPVqaNjQ\nzUdxIAhb6mjgZOBvoDLuyuNzoDuwAyjuX6cd8E12+/KShjoCWahTPProozpy5EhVVV2+fLlecskl\nmpiYqKqqt956q06aNEkTExMV0C+//FJVVQcMGKBPP/20qqo2atRIt2zZoqqqu3fvVlXVZ555Rm+9\n9VZVVV2xYoXWqlVLjx49qm+88YbWqlVLd+3apaouBXYgTbSq6pAhQ3TUqFGqqnr66afrvHnzVFX1\noYce0vvvv19VVRs2bKgHDhzQUaNGaUJCgk6ZMkXXrFmjHTp0SHdsTZs21RUrVqQqW7x4sZ555pmq\nqtq+fXvt16+fqqrOmDFDmzdvrqqqAwcO1MmTJ6uq6q5du/TUU0/Vw4cP6xtvvKENGjTQvXv36qFD\nh7RmzZq6adMmVVWdNWuWduvWTVVVr7jiCv3kk08yPMaAgk6vbWmoo9y+fcH/3MnJrmzvXtXPP1f9\n5pvgeytXet/nG28Et/vf/8JT7zwg0mmoVXUT8BzwF7AZ2Iu7FbRHVQN9pjf6A0Y6InKbiCwWkcXb\nI5yiNSe+/fZbfvzxRxISEoiPj2f27Nn86e9oUqZMGS7w5xlp1aoV69evB6B9+/bccMMNjB07Fp/P\nB7j01Ndddx0AjRs35qSTTmLNmjUAdOvWjUqVKmVZj507d3LkyBHat28PQJ8+fVKuKtq1a8eCBQuY\nO3cuDz/8MHPmzGHu3Ll07NgxV8d8zTXXAC653bZt2zhw4ADTpk1j+PDhxMfH07lzZ44cOcJff/0F\nQJcuXahQoQJlypShUaNGKeWTJ09OSYt99dVXMzmznpp+ll7b5Mjo0W564YWuty+4K4OePaFbN/ji\nC1fWs6d7hvDrr1nvT9X1HQho1iz/61xAwvaMQEQqAZcAdYE9wAfA+VluFEJVXwdeB5drKBx1DAdV\npW/fvjzxxBOpypOSkihZsmTKclxcXMoQlG+88QY//PADn3/+OS1btmTp0qVZfobX9NSZ6dSpE3Pm\nzGHTpk1cdNFFjBw5kqNHj9K7d+90655xxhksWbIk1fOAtMtpU1aLCKrKJ598Qv369VO9N2fOnFRp\nswP/DomJiUydOpUvvviCYcOG4fP52LNnDwcPHky1fl5Zeu0iLJBXLbPnaG3buun69XDjjW48gT17\nMt/fG2/ArFlufto08I8dEo3C2WqoC7BOVberaiIwFWgPVBSRQACqCaQfSSWKdenShffffz8lPfPO\nnTtTfvFmZu3atbRt25YnnniCSpUqsWnTJjp27JiSAnr16tVs3ryZBg0aeK5HlSpVKFOmDAsWLADg\nnXfe4eyzzwZc6usJEybQqFEjihcvTvny5Zk+fXrK1UOoBx54gCeffDLlGNauXcuIESO4//77U9Z5\n7733AJg1axbVqlWjbNmydO/enZdeeillneyC2/Tp0znzzDP5+++/Wb9+PX/99RcXXXQRn376aabb\nWHptkystW2ZcXrly6gyge/eC/wo9Q4E+CV27ulcUC2erob+AtiJyHHAYOA9YDMwELse1HOoDZP4/\nPQeqVcvflkO5zUIdeBDZpUsXfD4fJUqU4NVXX+Wkk07KdJsBAwawbt06VJVu3brRpEkT6tevz7/+\n9S+aNm1KiRIlePvtt1NdUXjxzjvvcPvtt3P48GEaNGjA+PHjAWjQoAFJSUkpJ8j27duzfft2KlSo\nkG4fCQkJDB8+nB49epCcnEyJEiX4z3/+Q5MmTVLWKVGiBPHx8SQnJ6d8xqOPPkr//v1p2rQpPp+P\nBg0aZHlSnzx5Mpdeemmqst69ezN+/HiuvPLKDLcJTa9dvHhxWrVqxbhx4xgzZgx9+/bl6aefplq1\nail1Gj16NNdeey1PPfUUF198ccp+evTowa+//kpb/y/C8uXL8+6771K1atVs/41NFAn0KM5qoJdh\nwyC0scCRI8FmpmnNm+eCyrRp+VfHSPHyICG3L2AY8CuwAngHKAXUAxYBa3C3i0pltx9PYxabiGjf\nvr0uXbo00tUoEPY3F8VGjVJNSFAtUSL7dRMTVYcNcw+A9+zJeB2fT7V8edW7787feuYzPD4sDms/\nAlV9FHg0TfFaoHU4P9cYY1Ls3w8DBrj5zH7dhypeHPbtc/OvvgqDBqV+3+dzA83s3w916uRrVSPF\nUkyYPJk3b16kq2BM1rZsCc4fOuRtm7//dtMxY2DFCjcuwLvvurKdOyHQ8z3kFmk0i+oUExoFo6uZ\n2GB/a1EskOsfXEsgLwKjgv39txtMfvLk4IPj3bvd9KGHov4hcUDUBoLSpUuzc+dO+w9qwk5V2blz\nJ6VLl450VUxOqQZTQ3/wQbBncXYyGh5y/363v0DLog4dcpaKohCL2ltDNWvWZOPGjURTZzMTvUqX\nLk3NmjUjXQ2TU+vWBX/BX3gheA3md90Fn32WumzXLpex1N9cmhhqVRa1gaBEiRLUrVs30tUwxhRm\ngdxZjzziPQiAu+WzfLl7WFy7tstMumQJLFzo3j/hBGjRIv/rGyFRe2vIGGOydfiwmyYk5HzbJk3g\n5ZehRw+3vHIlPP+8m//ySyhRIn/qWAhYIDDGxK7A4PF5eb4TGHbyscfctGrV3AWWQswCgTEmdgWa\nfJ55Zu73kbbvwUUX5X5fhZQFAmNM7AqMaZFNtt4spR1xzAKBMcZEkYMHg/f4cyttIEiTEysWWCAw\nxsSu/BiYvljsnyZj/wiNMUXXwYNQrlyka1HoWSAwxsSuAwfyfkUALj11yZKp8xbFEAsExpjYlJzs\nehUHmn/mxb//7a4ucjtQSSFngcAYE5tGjHDTE0/Mn/1lNaBNlLNAYIyJTUOGuGkGY3Gb1CwQGGNi\n2ymnRLoGufLdd64D82+/hf+zYvdaxxhTtJ18Mpx/fqRrkWM+n3stW+by3BVEklO7IjDGxJ59+2DT\nprzlGIqQxo3dM+mdOyEuDipXDv9n2hWBMSa2/POPuxoAaNo0snXJAVVYuxZ+/dUtf/wxVKhQMGPf\n2BWBMSa2vPhicP622yJXjxz6+GNo0CC4vHq195E18yrTKwIRuS+rDVX1+fyvjjHG5FFg1MJVq6Jm\nKMm9ezNu3JQfXSC8yOqKoLz/lQDcDpzsf/UDWoa/asYY44HP5waMuekm2LMH5s2D0093ryjx2mvB\n+dDxblq1KpjPz/SKQFWHAYjIHKClqu73Lz8GfFEgtTPGmOzMmwf33+/mTzsN/vjDDS8ZJZKSYNAg\nNz9kCDz5JNSpAxs2wOOPF0wdvDwsrgYcC1k+5i8zxpjIC4wcBi4IQFSNGTBunJv26eOCAMCKFXD0\nKFSpUjB18BII3gYWicjH/uVewFthq5ExxuTE338HnwWMH++mHTpErj45tHy5m77xRrCsXLmCTZqa\nbashVR0O3ATs9r9uUtWnw10xY4zJ1vr1sGYNDB0azDLavTtcdVVEq5UTb7/tRtIMfTZQ0LK8IhCR\nOGClqjYCfiqYKhljjAfJyXDuuW6+Wzf3xPXAAahXL7L1yiGfDypWjGwdsrwiUNVk4DcRqVVA9THG\nGG8aN4Z169x8hw7B7KDVq0euTrmgCs2aRbYOXp4RVAJWisgi4GCgUFUvDlutjDEmK6rBbGyBq4Id\nO9y0oBrf55OkpMjeFgJvgeCRsNfCGGNy4quvgvOnneam117rHhanHWy+kEtMjPxQB9l+vKrOLoiK\nGGOMJwcPQs+ebv6cc+A//3Hz994Lc+dCr14Rq1pOJSe7i5tIXxFk22pIRNqKyI8ickBEjolIsojs\nK4jKGWNMOqGppXv3huOOc/PNm7t+BFH0jODoUTct9IEAeBm4BvgDKAPcAowJZ6WMMSZT8+YF5zt3\njlw98sGkSW4aaPkaKZ6yj6rqGiBOVZNVdTwQfaM9GGOi3/r1bvrww/Dzz67lUBQLNHrq1y+y9fAS\nCA6JSElgmYg8KyIDPG5njDF5s3GjS8CzerVbDnQUO/fcyLe5zAfPP++GTihZMrL18HJCv96/3l24\n5qOnAJ5GgxaRiiLyoYj8KiKrRaSdiFQWkeki8od/Win31TfGxLR334WnnnL5hH75BRYtcuVRlEIi\nM4mJ7hlB8+aRrom3QNAAEFXdp6rDVPU+/60iL14Evvb3TG4OrAYGAzNU9VRghn/ZGGOcwYPhvvvc\nSC1Tpriy998PnjEnTYJSpXK0y0WL4JNP4KGHXFqit9/O5zrnwnvvuelll0W2HuBO8FmvIDIBaAfs\nAuYCc4B5qro7m+2OB5YB9TTkQ0TkN+AcVd0sIjWAWaraMKt9JSQk6OLFi70cjzGmMNm/37Xrz0lD\n+ewGk9m4MTgUpQeqUCzNT95WrSDSp5RatVy+vF27oFKY7ouIyBJVTchuPS9J5/qo6mnAZcDfuBZD\n2z3Uoa5/vfEislRExopIWaCaqm72r7OFTFJai8htIrJYRBZv3+7l44wxhcqWLW7Q3R490r/37bfw\n5ZfpyxMT05cNGJB6OYdnzT590pctWeJSPUfSsWNw1lnhCwI54aUfwXUi8hrwIdAF15y0o4d9F8eN\nZPaKqrbAPV9IdRvIf6WQ4SWJqr6uqgmqmnDCCSd4+DhjTKEyc6abTp8ebPK5fTscPgxduwY7hfmp\nQvI/W93CvffC1q388dlqDj35PLz5prtd1KxZjnoOHzsG77zj5h9+GOrWhfbt3XKkx7U/csRlHS0M\nvFyvvQD8CbwKzFTV9R73vRHYqKo/+Jc/xAWCrSJSI+TW0LYc1tkYEw2WLAnOX3MN/Pgj1KiR6eo9\ne8IfXx3mD4CEBA4cdyKnXXwiAEeO3OT5scA338CNN7rWpYFf/bfeCsOHu9fBgy7XfySTlB4+7MYp\nLizZMLzcGqoK9AVKA8NFZJGIvONhuy3A3yISuP9/HrAK+AwIXKz1AT7NTcWNMYXYzJku9UNH/82D\njRvh1FOz3OSrr+A4DgEw9cvSqRoG7c7yiWRql13m7kpdcokbmgCCI1mC67x16aWwdq33fea3QHeI\n44+PXB1Cebk1VAGoBdQG6gDHAz6P+78bmCQivwDxwFPAM0BXEfkDd6vpmZxX2xhTaPz4o/s5f+BA\nsKxbNze96qrgUJKh7wf4z/CBHraX4gZCfGNyWX7+Obja4cPeqvL113DIxRK+/95l9qxXL5iXLuBE\nd6FBUpK3/ea3n/yjuxTU4PTZUtUsX8AvwH+B/wNqZrd+OF6tWrVSY0whdfLJqqB6/fWqHTuqJiaq\n1q+vWry4m1dV7dvXrQOqV12letppKcvTp6uCT4tzTFeUb6MKWorD2qJFcJO5c71VpV+/4DaB1yef\npF9vxAj33oED+ffPkJnly1Wfe0718GHV5GRXFqjb7t3h/WxgsXo4x3o+GQPHeV03v18WCIwpxKpW\nTX/2BdULLgiuc/Cg6nffqU6cqOrzqS5Zkmrd9dRKmT8cd5yWL6967Jjq99+74gYN3GZZSU5268bH\nu/mJE1WPHMl43RdfdOvu3Jl//wyZCf0nuewy1bffDi6H/7O9BQIvt4baicgq4Ff/cnMR+W+4rlCM\nMVEmOTnj8gcfDM4fd5xLEHftta6fQMuWcNNNKW/X5q+U+dJntWLfPpeRs3VrV7ZmDUyYkHU1AklJ\nzzzT9Ru49trM+50Fyl9/Pet95tWPP6ZenjoVbrjBzQfunhUGXnoWvwB0B3YCqOrPQKdwVsoYE0Wq\nVk1flpzsxgrIyptvckON6SmL85rfAeedByNHppSJwMCBbn7u3Mx3peq6JgC8+GL2VQ6chLMLLnkV\nCGRPPpn+vcKUONVr9tG/0xRl8hPAGFOkrF7txgAAGDPGPRDety99V95MfJXYhbt4ia2cyGddX3Zn\n8zZtUq3z7LNw9tlZdwDbts0Fg5EjvTXJrFsX7r4bfv3VtecPl2r+7rL33Qe//x4s37wZBg0K3+fm\nlJdv628ROQtQESkhIg/gcgYZY4qyX3+FM85w83fc4V5ly+ZozOA9e2AMd1GdrRQvkXlqidNPdznn\nBg8OtgoKFUhOmpNeunXrumnoHaz8pApbt7pbVmXKuNazx465oZWrV88+k0ZB8hII+gF3AicDm3DN\nQO8MZ6WMMYXcL7/A448Hl/v3z/EukpLcK3DxsGxZ5utWqeJ+uY8YkXGcmTPHTc86y/vn33WXm770\nkuuElt/+8j/2CL3AKVHCHUth46VD2Q5VvVZVq6nqiap6HRDGiyljTKGWnOwygU6e7HIJjRuXbWex\njAT6BgwcCBdc4E7ImbnoouC8L4NeTP/846Zp+wtkpUQJGDrUzZ9/vvulnl+++grq1HHzGaVaKmyy\nDAQicrKIJPgHpkFEThSRp3DDVhpjiqJZs4Lzw4dD37652k0gEJxyiss/V79+5uum7XilaTKUHTzo\nOo7FxeWsDo8/DpUru/ncpqZeudK1PgoEKFV44gk3f9ZZhWO8gexkGghEpD8ujfRLwPcicgvu2UAZ\noLD0hzPGFLTAU89Vq4L3V3Lgt9/c7aDvvnPLXh7uFi/uWg0FWhAdOxZ8LykJPvwwR48mUojApk1u\n/ssv3X7TBpnsNGkC//oXPPdcMOX1woVu7Jz583M8dEJEZHVFcBvQUFXbAb1wWUe7qeoADaaRNsYU\nNZMnu7Nuo0a52nzOHHfCfOght+w18VqHDsFWOKGBYO5c9/wgt8MXly7tktTNmOFO2sWKuQZQXmzc\nGJwfNCh1Y6nnn89dfSIhq0BwRFV3AajqX8Bvqroki/WNMbFM1T2tnTvXDQyTy2Yv5cq5aSDxWk4y\ncAZ+XQcGfQc3fDHA6NG5qg4AvXqlXr7rLti5M+ttNm2C667L+L1p0wpPimkvsgoENUVkdOAF1Eiz\nbIwpSgYOdO03wV0V5ILPl7qFTpMmOWvpExhDoHnz9ENO5qU1To8e8NprEDr0yYcfZr3NbbfB7Nlu\nPrTD2LRpbriFaJJVIBgILAl5pV02xhQln3/upkuWQHx8rnYxcGCwN++CBa4VaiATqBdnn516+bPP\n3PSBB3JVnRQlSrgT+zffQO/eruz999Ovd/iw+6y4uOAAawkJ7gri3HPd8JfRFgQgi4FpVDXMna+N\nMYVa4MknuDNgqVIuyX/LlrneZWhq6XbtcrePf/6Bk05y8x995KZZtTjKiRYt3JXAaae5zm5pjRjh\nhlkI+PZblxUD3DOGaOWtH7gxpugJHbmlUiX3871ahkOMe7JkiTtZtm2bcV8Ar2rUSN+yJ7N79bnV\no4frOB1az6lTYdiw1OsFgkC0s0BgjElv/35o0CC4HEjIc999ud7lypVueued+ZNeITAo/UsvBR9A\n55fTT3epLL7/PlgWuGUE7momq9xH0cbLmMXGmKLmssuC89WquaQ5vXtDw4aZb5ONQCzJr6ybb73l\nXuHQpYubXncdLF+eulnoX3+5TnCxxMt4BKeJyAwRWeFfbiYiQ8NfNWNMxCxd6qbbtgVzNufhRvyq\nVa7TFbh2+4Vd/fpQs6ZrpnrZZe5BMri+AbEWBABEs+lGJyKzcS2GXlPVFv6yFarapADqB0BCQoIu\nXry4oD7OmKItMRFKlnTNY0aOdLmFPvnENbbPaQ4H4IMP4Morg8v79+f/rZxwOHAgdW/lYsVcL+bC\nlDU0OyKyRFUTslvPyzOC41QupK8SAAAgAElEQVR1UZqyCA35bIwJu+n+wWIqVnTTuDh3WygXQeDI\nkdRB4KqroiMIgKtnaFLVSy6JriCQE14CwQ4RqQ8ogIhcDliKCWNi0eHD0LOnm0+b6S0X7r7bTTt1\nci19pkzJ8y4LVGhHsauuilw9ws3Lw+I7gdeBRiKyCVgH5HNjLWNMoRA6HmRuG/qHGDvWTWfOzPOu\nIqJsWRfAfD7Pg65FpWwDgaquBbqISFmgmKruD3+1jDER0b27m+7aBccfn6ddBcYO7t49+k+i0V7/\n7GQaCEQkwwbD4r9JpqpRlFvPGJMjORnzMQPbtwfvrwdSR5vCK6srgsDz8obAmYA/qwcXAWkfHhtj\nYsFJJ7nhwnLpiy/gwgtTlyVk22bFRFpWuYaGAYjIHKBl4JaQiDwGfFEgtTPGFKxjx/I0kkrgdlDA\nnj15vsNkCoCXO1/VgJBhIDjmLzPGxIqJE10OiB073HBguRTaa3jhQgsC0cLLN/42sEhEPgYEuAR4\nK5yVMsYUgORkl77z2DG4/vpgeVLuuwkF+qeuXw+1a+eteqbgeGk1NFxEvgI64voS3KSqS8NeM2NM\n7syf7/IoV62adQ+oVq1S54X2SxryaK6TkG329zCqVSuXOzAR4bVRVDLgC3kZYwqjuXPd4L4nnggD\nBmS8TlKSSyNx9GiGb5c4+cRsh2nMiCq8/LKbj9UeuLHKS9K5e4FJQFXgRGCiiNwd7ooZY3IhdOzG\nF1+EP/9M/f7337vhuEqWhN9+C5bfey+rX51NM9wVwnPP5fyjA2mhoyWFhAnyknTuF6Cdqh70L5cF\nFqpqswKoH2BJ54zJ1p49rt3m/Pnp30tKCuYJ+te/4PXXg+/dcw/06oW2aUvFGmXYt88Vn3IKbNjg\n/Zf9li1uwBiAjRvd2PYm8rwmnfNyK1Bwt4YCkv1lxphI+/hjdwaeOjUYBLp2hcceg/bt3XLbtm5o\nrUAOIYA2bdzD4ueegxIlaNWSlCAA8Pffrjftpk3BYSEzowrnn+/mR42yIBCNvASC8cAPaVoNjQtr\nrYwx2TtwIPUAMgH9+sFZZ7mhJuvVcyOqhwaBJ5+EIUNSFg8fDg4/MHq0G6YxMDjZ/ffD5MlZV+Pt\nt4PPnC+/PA/HYyIm22cE/lQSNwG7gJ24VkMvhLtixpgs9O2bOlk+wEMPuZ/ngeBQty68+27qdUaO\nTBUEvv4ajjvOzU+c6LKF1q/vRuECly1UxD1uyGyc4Q0b3PSTT9xgLib6eHlYXB9YqaqjgeVARxGp\nGPaaGWPSS0qC//0Pxo93y2ed5V4dOsAjj6Rfv00bOPdcN0SYKsfueYDXXnN3hXbuTJ1NomXL4Hza\nUbj693cjVi5YkP4jjhxxz58vuSTvh2ciw8utoY+ABBFpALyKyzn0LtDDyweISBywGNikqheKSF1g\nClAFWAJcr6rHstqHMcbv2mvh/ffd/PjxcOON7qe6SMZPduvVgxkzUhbPOMM1JKpaFb76KrjarFlu\nwPZQ+/a5NNKB8ep37HCPHY4cSZ2FYuVKKFMmX47ORIiXfgQ+VU0CLgNeVtWBQI0cfMa9wOqQ5RHA\nKFVtAOwGbs7BvowpunbsCAYBCLbXLFbMU/OeYcOCrUlvvhnG+Z/0HTgAZ5+dfv3y5V1XhFmz3NDF\ngwe78hEjgj2If/kFPvss+FzaRCcvgSBRRK4BbgA+95eV8LJzEakJ9ATG+pcFOBf40L/KBKBXTips\nTKF1883uJnlmN9PzQjV4th07NngVkAOPPRac37vXTW+5xQ2+kpWzz4YTTnBDGAM8+qiLPd9/D82b\np9+3iT5eAsFNQDtguKqu89/aecfj/l8AHiTYG7kKsMd/hQGwEciwsZmI3CYii0Vk8fbt2z1+nDER\nsn8/vPmma2/Zvz98913q9w8ezFMOHzp3ht9/d/NXXJFtENi+3d3S2bEjWJY27cNHH8Ebb3ivQpUq\nrgFSQOgAZq1be9+PKXy8tBpapar3qOpk//I6VR2R3XYiciGwTVWX5KZiqvq6qiaoasIJJ5yQm10Y\nE36ffuqeor73XrDspZfgvPPcz+Vp09z9k3Ll3BPVW25x6R1yavZsN/3gA6hQIctVN21yGSZGjQre\nzlm/3rUEuuEG16ds9uyMW55mp1UrmDAh59uZQk5VM3wB7/uny4FfQl7LgV8y2y5k+6dxv/jXA1uA\nQ7hUFTuA4v512gHfZLevVq1aqTGFzoABqu6mTfDVqVP6srSvcuVS78fnU925M/3+k5JUf/tNNSHB\nbTd0qKdqhX5UtWqu7L333PKsWXk8ZlVdty64/xUrVDdsyPs+TXgAizWb86u6rzLTE3kN/7R2Ri8v\nOw/Z1znA5/75D4Cr/fOvAndkt70FAlPo/Pe/GZ/kjx5V/eEH1e+/V7388qwDwooVqgcPBoPHypVu\n3xMnuiAzcmTq9X/+Ocsq/fmnaokSwdUbN3bT0Hi1e3f+HL7P5+KUKdy8BoKsRijb7J9uEJHqQGtc\nGuofVXVLHi5CBgFTRORJYCnWS9lEo7Fj3fSDD1xvq3nzXBK3kiWDN8zffBOaNHHtLfv2hYoV4ZVX\n3NNWcGX168OcOW65cWM3VvDu3ek/78ILoVnm6b1U3SDxgbtOL70EHTtCfLy7RQTuAW/FfOoBJBJM\nX2Sin5ekc7cA/wa+w6WYOBt4XFXfDH/1HEs6ZwoVVWjY0J2YP/wwmNLZS9rN9evhzjvhyy/djfxt\n27Jef8GC1E9lM3D0KKxY4cYGfvhhGD7clft8qU/WBw8GexGbosFr0jkvrYYGAi1U9UZV7QO0wv2q\nN6boWbHC/bT+4w/3Cx7cQ2CvuZfr1HEjvA8YkDoIrFzppm3auCDx66+eggC4PmaBAeJDB44vVgym\nT3ctWnfutCBgMuelZ/FOYH/I8n5/mTFFT6CbLWQ+8IsXVaoE53/6yXX5zebqPDMffRScb9s29Xtd\nurhMosZkxUsgWIPLPvop7hnBJcAvInIfpCSlMyb2HT7sfmKXL+/y/xfzOsBfBk480U3vvBNatMhT\ntVq2dLHkwgttZDCTO14CwZ/+V8Cn/mn5DNY1Jrbs3eturp90UjDf//335y0IgEsPUb8+nHNOrncx\neLBL9wDQrZvr0mBMbngZvH5Y2jIRKa7B3sHGxJajR93JftMml1sZXIugZ55x87175/0zSpZ0WUFz\naerUYBAA1+M3r7HJFF2Z/umIyLyQ+bQpJRaFrUbGRNqgQTBmTDAIgDv5B7J4BkZtiZA33wzGosAz\n6scfj1x9TPTL6oogNBVVkzTv2Z1IE/1UXWrN995zeZmfeCI4CktaK1a46fDhULp0gVYzrZtD8vXu\n35/5esZ4lVUg0EzmM1o2puAEEuLn9smozwdXX+06g4Xq2NHdFgJ3D79WLfds4NNP3VBep5/uhvDK\npXXrYNcul68nNw4cgEmTgsvffJPrqhiTSlaBoKKIXIq7fVRRRAIpqgQ4Puw1MyYjjRq5+/UDBsCW\nLa4lz9SpOQsKc+emDwIQHIEdXPL+2rXdfL9+easzblzfwPABXluJ7tmTuifwAw/Aa6+5+XffdQ+I\njckPmfYsFpHxWW2oqjeFpUYZsJ7FBnBNNzM6+02aBP/3f972sX69S+0wcyY8+6wbqb1+fdcra+rU\n4Hq5bNOfmdA4lXbX777r4tr777uLkmLFXKxr1AhOOw1OPtlVN1Q+V8/EKK89iz0njovky5LOGU1M\nDGZO698/fQK3Tp1UixVz88WLqx44kPF+AuvHxakmJwfLV6xw5aefrrpjR75Wfdmy1FVt3lx140b3\n3oMPpj+Uhx9WPe20jPPUNWnikssZ4wV5TTpnTKGRnBxM5DZ0qHuoO3y4a4J5/fUwZUowcRu4AWBG\nj4aHHkq9n127gvMzZqRub9m4cdh+Zv/vf27aubP7Zf/zzy7tQ2aeeio4P2QIFC/u0hkdPAi33eaG\nITYmP1kgMIXf+PGwdKmbv/12Nw0kzpk82Y2T2LSpG2ll0SL3VPbhh6FnT5d/oXZtl7Pnhx/cNiNG\nZDxIbz5LSnLZP1eudLd8mjRJf4sH3Ng155zjTvb16sHWra78s8/goovCXk1jLBCYQi4pCW691c0v\nWeJa8aTVsCEcO+bmVd0Auzt3BgfUTauAzq6DBgVzyT3+uEsndMEFLiAMGOAO5dRToWtXt06JErB6\ntat6hLsqmCLGUyAQkbOAOqHrq+rbYaqTMUGB2zudOrmkOtkRgQ0bMs8GumSJawYaZjNmwPP+LFxb\ntrjRLMEFAnDZqzNSqZJ7GVOQsg0E/l7F9YFlQLK/WAELBCZ8tm1zg7TPmePG6E07GHxWypZ191u6\ndXO3g844wyWKO+88b8EkD1591cWZO+5wy59/HgwCxhRWXq4IEoAz/E+gjQmfxER3f2TfvuDZs29f\n9/Q0p8Nhde2a+uHv3r35V89MLFgQfIQBrvtBz55h/1hj8sxLmqoVQPVwV8QUYatWucbyJUvCsmVQ\no0bwvbFj8+cndYUK7hUGe/bAn39C+/apywOPNowp7LxcEVQFVonIIuBooFBVLw5brUzRkJTkMno+\n8kiwbMkSOHTIzR84EBUJ9ps1Cw7+cvnlbuwa1bDfhTIm33gJBI+FuxKmiJo4MXUQALjrLjedMMHd\n648CoSOA3XOPp9EljSlUvIxHMLsgKmKKoHn+TOe33eba9V97rUsoB6nz/hRyFSu60cHatLEgYKJT\nts8IRKStiPwoIgdE5JiIJIvIvoKonIlh8+fDuHEu189rr7nEOgG33BIcyrEQ+usvd/IXgYULXcLS\n6tXdxUxx65ljopCXP9uXgauBD3AtiG4ATgtnpUyM++cf1wsY4Omn3bRhQ9e8s21bl0KiEBs9OtgI\n6ayz3LRUqcjVx5i88vT7RVXXiEicqiYD40VkKfBQdtsZk05iomshBC7tZrNmbr5sWfj228jVy4MF\nC9K3DAqoX79g62JMfvISCA6JSElgmYg8C2zGW7NTY9J76y037dcPrrkmolXJiaQkePLJ4PJdd7nn\n3K+84noO33hjxKpmTJ5lOh5BygoitYGtQElgAG5Qmv+q6prwV8+x8QhixJEjLg9Q2bJuXIAID/mY\nE+edF+zc/OWXwVQRxhRmXscj8NJqaIOIlAFqqOqwfKmdKZquv971DXj11UIdBBYudAlN27Rxv/qn\nTw8Ggd27U48aZkws8JJr6CLgOdwVQV0RiQcetw5lJkdWrQpmWiuEuZWTktxdq4YNXX47cOmKQp9b\nT5pkQcDEJq8dyloDswBUdZmI1A1jnUys2b8/OGL7tGlhS/WQFwMGwMsvZ/7+RRd5Hw3TmGjj5aFv\noqqmzdhlCeiMd2PGuOcDl1wSTL5fiGzcGAwCJUrATTe5vgHffBNcZ+TIyNTNmILgJRCsFJH/A+JE\n5FQReQlYEOZ6mViyZYu7Cvjkk0jXJJWFC93IYaec4pZnzXLj27z5pst/160bHD7sOpA1bBjRqhoT\nVl4Cwd1AY1zCucnAPqB/OCtlYszRo4Wux9WuXa4zWCDLBWTcR6B06WCgMCZWeWk1dAgY4n8Zk3PH\njhW6QBAYsvj886FFCzdmsKWHMEVVpn/6IvJZVhtaqyHj2dGj7l5LmOzYAb/+Cq1be/uYxYthxQo3\n/+WXUZHp2piwyuo3UDvgb9ztoB8A++9icmfPHjjuuLDs+q233MPdgGPH3APfrDzzjJv+5z8WBIyB\nrJ8RVAceBpoALwJdgR2qOttSUxvPVF2m0TZt8n3XL76YOggA3HBD1tscPAgffQTHH+8GkDHGZBEI\nVDVZVb9W1T5AW2ANMEtE7vKyYxE5RURmisgqEVkpIvf6yyuLyHQR+cM/rZQvR2IKp6lT3RVBQra9\n3D375RcYNAj6+5ssbNsGPp+bnzLFZbD44IP0Oez27IFy5dz8gw/mW3WMiXpZPh4TkVJAT+AaoA4w\nGvjY476TgPtV9ScRKQ8sEZHpwI3ADFV9RkQGA4OBQbmrvilQgdzLBw7ASSd5u68ybZqbXnpprj82\nKck16dy82fXs7R/SZm30aJe+CFxz0Hbt3EiXV17pyi64wHVjqFs3dSx6+OFcV8eY2KOqGb6At4Gf\ngCeBJpmt5/UFfIq7vfQbLm8RQA3gt+y2bdWqlZoI2rtXtVcvVXejx71q1VJ9/33VuXMz327JErdu\n6dK5/ujx41N/bNpXYmL6bfr1S7/eG28E548cyXV1jIkqwGL1cn7O9A3wAfv9r30hr/3APi87D9lX\nHeAvoAKwJ6RcQpcze1kgiKC1a7M+E4PqU0+l3y45Ofj+o4/m+GPXrVMdMiS4i/h41VdecfMDB6oe\nPpz19lu3uthVrlzqqi5YkOOqGBO1vAaCbNNQ55WIlANmA8NVdaqI7FHViiHv71bVdM8JROQ24DaA\nWrVqtdqwYUNY62nS2L/fpYSYOTNYNmWK64nVtSvMmOHaavbt697bti14j2bbNujeHZYtc206f/jB\n88fOmgW//w7PPQd//OHKJk+Gq6/O/aGMG+dGvzzrLPfc2piiwmsa6jzd7snuBZQAvgHuCymzW0OF\n2f79qps2qZ5/fvBndLNmma//4YfB9WrUUF28WLVjx2DZsWOePzr09g2oVq2qOn9+3g/J53NXArt2\n5X1fxkQTPF4RhG2kMRERYBywWlWfD3nrM6CPf74P7tmBiaSDB2H4cChf3r1OPhm+/tq1xVyzBpYs\nyXzb3r2hj//r3LzZPZGdO9ftZ/Pm7Bv1+/l8cOutweXTTnNj1wTGBM4LEfcQuZK1TzMmQ+EccrI9\ncD1wrogs8796AM8AXUXkD6CLf9mE24IF8PrrboSVsmXd2fGJJ1zWtXLlYOhQ1xoo4IIL3D2V+vWz\nz70wbpzr3hsYhhLc7aDq1T1X7/HH3XTwYHc98NtvrprGmPAL+zOC/GBDVebRfffBqFHZr3fxxW5M\nxrvvzn2XW58Pinn7faEKycluDPvARcXevYVyuAJjolK+DVVpotzMmemDwNChcO+9rjH9SSdBmTLQ\noUPG6TdzykMQSEx0Y9e/+Wbq8s8/tyBgTCRYIIhlqvCvf7n51atdK58jR+CMM1zZ669HpFo33eSG\nfQz10UfQs2dEqmNMkWeBIJY99ZRrgzlsGDRqlG+7TUyEDRugQYOcbZecDJddBp995oZ9HDYs5/sw\nxuS/cD4sNpH0+efuFhDA/ffneXeJie5Wjoi7sDj1VNeF4NAhd+GRmVdecduIuGfOn/mTm48da0HA\nmMLCAkGsCqTlXLgwz81vunZ1u7j55tTl48e78mLFXBK4UKquIdEdd6Tf3/Tp7rGEMaZwsEAQTQ4d\ngvh49/M6dGT1UMeOQefO7iw8YAC0bZtuFVV46CE3RMC118Lbb7tMnQsXwpAhrmNwv35w7rnuo779\n1l0RgBt2WBV27069z2efhZUr3VWCiAsOgY7Gd9yROidFly75+G9ijMkzaz4aTXr1gk/T9L/74gvo\n0SO4/MADbsQVcB3F/APCHDzoug388INL45ATXbq4jBENGrgqhPr0U9i3L+NxAEqWdC1Rn37ac78y\nY0w+KhQpJvLrVeRTTKxfnyr3woabHtVjFE9ZvuzsHTpwoOr+rQc1Oa64Hi1TQTt1SNalS1X/+Ud1\n2DDVChVSp28A1bvvVn388fTloQlGJ0xwKRqy07y522b8eNVt28L+L2KM8YDCknQuPxT5K4KuXVNG\nWbn95M94ddNFHM8eltKCuqwHoCNzeJF7aclSevI5X5K+LWbr1rBoEVxxBbz6KlSu7MqTkyEuzs37\nfC4MBJZzQtWGfjSmMLEOZVFqxw6XEycuDndWPvdcmD2bHbcMpvrYJ0jeVNw/xktF3m62lmvfv5gG\nqz9nLp3c9lRhVZ2eDLzC9SVLTIR161z2h169Mj5Rh570PXYKzpAFAWOikwWCQmL2W+u474lKbF+7\nj7+pxUkVD7F2XxVK+Y4AcPrY+0imOM8/754BOwL//tT1EfDnbK66fgnrakfmGIwx0ckCQaSNHQu3\n3srZQKocn3uCs3Ek4SOORx4JDQJ+xYq5BP4rV0LVqlCtWvjrbIyJKdZ8NBJU8bU7y91LCc29nMbr\nlQYhKP93XRw+XzBDZ4YaN7YgYIzJFbsiKEBHj8JXXyr1bupEs70LU8rfog/V33+J86suhqZNoUoV\nOHaM20qVckO0GWNMGFkgKAjJyewbNY7+jx7PkEMPU5+1HKAs7cqtYNyMOtzYOrBi5+A2pUpFoqbG\nmCLIAkGYqLqOW0uXQp1HbuSyQxMJzbr8+YRd/HJ9SWtpY4yJOHtGkM927nTjuxQrBn3O/Yuy9/+L\nyw5NDK4wdCj8+SdX32BBwBhTONgVQT5K/nkFp7ZrwIjDd/MZY1PK98V3ouTMbyhdsXQEa2eMMRmz\nQJBfRo8m7t572ZW2/JprqDBxYt56ahljTBhZIMgHvnHjKXbvvakL583Ln6EfjTEmzCwQ5FVSEnvv\nGUoloDqb+WHaPmo3rQDVq0e6ZsYY44kFgrxISmJ1h1s5/dA/3MkYNhypTqlSFgCMMdHFblznwdY7\nhnH6D28xhau499fbrem/MSYqWSDIKZ8PgOkf7KHEG2MAOGPRBE5raG1BjTHRyQKBV88/73IDxcWB\nCF2vrERldrP06a9pdqZdChhjopcFAi+mTYP7709XvLtmE1oM7h6BChljTP6xh8Ue6PXXI0BvPkRQ\n1lGX/ac0ZvU66yBmjIl+FgiyM2YMsm0bs+nEVHoD0KYNTH8vd8M5GmNMYWOBICurV8NddwFwBR+w\nbBk0a2ZDMhpjYosFgqzcfTcA/RnFwGdPpHnzCNfHGGPCwAJBRlTdaO8zZvA9bZjRpD+L7op0pYwx\nJjys1VCogwfR2+9wCeL69gWgH6+yYAGUKRPhuhljTJjYFUHA0aPo8ccjyckpRd34hoemxFO+fATr\nZYwxYVYkA4Fq8IGvz+cuAHwPDKRYcjIbqEVbvufq/jV46lpISIhsXY0xJtyKVCDYvh1OPDF9+btl\nb+Gag+P4nJ58fefnzL8P6tUr+PoZY0wkxHQg8B04xP4RY5iY/H889loNduxyj0QEHzfyFmexgD5M\noMTBJADm3fUeo1+0MWSMMUVLRAKBiJwPvAjEAWNV9ZlwfM7i616g9adDuJMHuYITOEA59h5fm3j9\nCdm3DwA9+WSm176FaWcO4ennSlgQMMYUOQUeCEQkDhgDdAU2Aj+KyGequiq/P6vkorkA/NzlPuoU\n+5sT5n+J7F0HNWvCbbfBI48gFSrQ1V8ZY4wpiiJxRdAaWKOqawFEZApwCZDvgWDxFc/y/c4h9JvY\nIVh45AiUKmXdg40xxi8SgeBk4O+Q5Y1Am3B80C0vNk1fWNoSxRljTKhCe0dcRG4TkcUisnj79u2R\nro4xxsSsSASCTcApIcs1/WWpqOrrqpqgqgknnHBCgVXOGGOKmkgEgh+BU0WkroiUBK4GPotAPYwx\nxhCBZwSqmiQidwHf4JqPvqmqKwu6HsYYY5yI9CNQ1S+BLyPx2cYYY1IrtA+LjTHGFAwLBMYYU8RZ\nIDDGmCJOVDXSdciWiGwHNuRy86rAjnysTjSwY459Re14wY45N2qrarbt76MiEOSFiCxW1SI1qoAd\nc+wrascLdszhZLeGjDGmiLNAYIwxRVxRCASvR7oCEWDHHPuK2vGCHXPYxPwzAmOMMVkrClcExhhj\nshCzgUBEzheR30RkjYgMjnR98pOIrBeR5SKyTEQW+8sqi8h0EfnDP63kLxcRGe3/d/hFRFpGtvbe\niMibIrJNRFaElOX4GEWkj3/9P0SkTySOxatMjvkxEdnk/66XiUiPkPce8h/zbyLSPaQ8av72ReQU\nEZkpIqtEZKWI3Osvj8nvOovjjez3rKox98Ils/sTqAeUBH4Gzoh0vfLx+NYDVdOUPQsM9s8PBkb4\n53sAXwECtAV+iHT9PR5jJ6AlsCK3xwhUBtb6p5X885UifWw5PObHgAcyWPcM/991KaCu/+89Ltr+\n9oEaQEv/fHngd/+xxeR3ncXxRvR7jtUrgpThMFX1GBAYDjOWXQJM8M9PAHqFlL+tzvdARRGpEYkK\n5oSqzgF2pSnO6TF2B6ar6i5V3Q1MB84Pf+1zJ5NjzswlwBRVPaqq64A1uL/7qPrbV9XNqvqTf34/\nsBo3imFMftdZHG9mCuR7jtVAkNFwmFn9Y0cbBaaJyBIRuc1fVk1VN/vntwDV/POx9G+R02OMlWO/\ny38b5M3ALRJi8JhFpA7QAviBIvBdpzleiOD3HKuBINZ1UNWWwAXAnSLSKfRNddeUMd0crCgco98r\nQH0gHtgM/Cey1QkPESkHfAT0V9V9oe/F4nedwfFG9HuO1UDgaTjMaKWqm/zTbcDHuMvErYFbPv7p\nNv/qsfRvkdNjjPpjV9Wtqpqsqj7gDdx3DTF0zCJSAndSnKSqU/3FMftdZ3S8kf6eYzUQxOxwmCJS\nVkTKB+aBbsAK3PEFWkr0AT71z38G3OBvbdEW2BtyyR1tcnqM3wDdRKSS/1K7m78saqR5nnMp7rsG\nd8xXi0gpEakLnAosIsr+9kVEgHHAalV9PuStmPyuMzveiH/PkX6KHq4XrnXB77gn60MiXZ98PK56\nuBYCPwMrA8cGVAFmAH8A3wKV/eUCjPH/OywHEiJ9DB6PczLuEjkRd//z5twcI9AX94BtDXBTpI8r\nF8f8jv+YfvH/R68Rsv4Q/zH/BlwQUh41f/tAB9xtn1+AZf5Xj1j9rrM43oh+z9az2BhjirhYvTVk\njDHGIwsExhhTxFkgMMaYIs4CgTHGFHEWCIwxpoizQGAiTkRURP4TsvyAiDyWT/t+S0Quz499ZfM5\nV4jIahGZGYZ9H8jm/Yoickd+f64pOiwQmMLgKHCZiFSNdEVCiUjxHKx+M3CrqnYOV32yUBGwQGBy\nzQKBKQyScEPyDUj7Rtpf9IFfxyJyjojMFpFPRWStiDwjIteKyCJxYzXUD9lNFxFZLCK/i8iF/u3j\nRGSkiPzoT/T1r5D9zk/MYOkAAANNSURBVBWRz4BVGdTnGv/+V4jICH/Zv3EdhcaJyMg0658jIp+H\nLL8sIjf659eLyLP+/S0SkQb+8roistBf/mTItuVEZIaI/OR/L5Bt8hmgvrg89iP96w4MObZh/rKy\nIvKFiPzsr/9Vnr4dE/Ny8ovHmHAaA/wiIs/mYJvmwOm41M1rgbGq2lrcYB93A/3969XB5W6pD8z0\nn3BvwKUnOFNESgHzRWSaf/2WQBN1aX9TiMhJwAigFbAblwG2l6o+LiLn4vLJL87hce9V1aYicgPw\nAnAh8CLwiqq+LSJ3hqx7BLhUVff5r56+9weswf76xvvr2Q2XiqA1rifuZ+ISE54A/KOqPf3rHZ/D\nupoYZVcEplBQl4HxbeCeHGz2o7r87kdx3ewDJ/LluJN/wPuq6lPVP3ABoxEuF80NIrIMlwa4Cu7k\nCbAobRDwOxOYparbVTUJmIQbTCYvJodM2/nn24eUvxOyrgBPicgvuLQLJxNMzxyqm/+1FPgJd7yn\n4v5duorICBHpqKp781h3EyPsisAUJi/gTlzjQ8qS8P9gEZFiuNGYAo6GzPtCln2k/ttOm0dFcSfV\nu1U1VWIyETkHOJi76mcopf5+pTOoS3bzAdfiftW3UtVEEVmfwf7AHdvTqvpaujfc0I49gCdFZIaq\nPp79IZhYZ1cEptBQ1V3A+7gHrwHrcbdiAC4GSuRi11eISDH/c4N6uORd3wC3i0sJjIicJi6ba1YW\nAWeLSFURiQOuAWZns80G4Ax/9siKwHlp3r8qZLrQPz8fl00S3Mk/4Hhgmz8IdAZq+8v344Y9DPgG\n6Csu5z0icrKInOi/tXVIVScCI3G3wIyxKwJT6PwHuCtk+Q3gUxH5Gfia3P1a/wt3Eq8A9FPVIyIy\nFnf76Cd/auDtBIdDzJCqbhY3SPhM3K/uL1T102y2+VtE3selFV6Hu10TqpL/Vs9RXGABuBd4V0QG\nEUy/DO5W1P9EZDmwGPjV/xk7RWS+uEHvv1LVgSJyOrDQHRoHgOuABsBIEfHhMpzenlXdTdFh2UeN\niRD/rZ0EVd0R6bqYos1uDRljTBFnVwTGGFPE2RWBMcYUcRYIjDGmiLNAYIwxRZwFAmOMKeIsEBhj\nTBFngcAYY4q4/wfEvofgYnpNMgAAAABJRU5ErkJggg==\n",
349 | "text/plain": [
350 | ""
351 | ]
352 | },
353 | "metadata": {},
354 | "output_type": "display_data"
355 | }
356 | ],
357 | "source": [
358 | "plt.plot(tf_num_updates[:2500], tf_rew[:2500], c='b')\n",
359 | "plt.plot(torch_num_udpates[:2500], torch_rew[:2500], c='r')\n",
360 | "plt.ylabel('Mean Episode Reward')\n",
361 | "plt.xlabel('Number of updates')\n",
362 | "red_patch = mpatches.Patch(color='red', label='PyTorch code')\n",
363 | "blue_patch = mpatches.Patch(color='blue', label='Tensorflow OpenAI code')\n",
364 | "plt.legend(handles=[red_patch, blue_patch])\n",
365 | "plt.savefig('imgs/comparison.png')\n",
366 | "plt.show()"
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": null,
372 | "metadata": {
373 | "collapsed": true
374 | },
375 | "outputs": [],
376 | "source": []
377 | },
378 | {
379 | "cell_type": "code",
380 | "execution_count": null,
381 | "metadata": {
382 | "collapsed": true
383 | },
384 | "outputs": [],
385 | "source": []
386 | }
387 | ],
388 | "metadata": {
389 | "kernelspec": {
390 | "display_name": "rl",
391 | "language": "python",
392 | "name": "rl"
393 | },
394 | "language_info": {
395 | "codemirror_mode": {
396 | "name": "ipython",
397 | "version": 3
398 | },
399 | "file_extension": ".py",
400 | "mimetype": "text/x-python",
401 | "name": "python",
402 | "nbconvert_exporter": "python",
403 | "pygments_lexer": "ipython3",
404 | "version": "3.6.3"
405 | }
406 | },
407 | "nbformat": 4,
408 | "nbformat_minor": 2
409 | }
410 |
--------------------------------------------------------------------------------
/storage.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
3 |
4 |
5 | class RolloutStorage(object):
6 | def __init__(self, num_steps, num_processes, obs_shape, action_space, state_size):
7 | self.observations = torch.zeros(num_steps + 1, num_processes, *obs_shape)
8 | self.states = torch.zeros(num_steps + 1, num_processes, state_size)
9 | self.rewards = torch.zeros(num_steps, num_processes, 1)
10 | self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
11 | self.returns = torch.zeros(num_steps + 1, num_processes, 1)
12 | self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
13 | if action_space.__class__.__name__ == 'Discrete':
14 | action_shape = 1
15 | else:
16 | action_shape = action_space.shape[0]
17 | self.actions = torch.zeros(num_steps, num_processes, action_shape)
18 | if action_space.__class__.__name__ == 'Discrete':
19 | self.actions = self.actions.long()
20 | self.masks = torch.ones(num_steps + 1, num_processes, 1)
21 |
22 | def cuda(self):
23 | self.observations = self.observations.cuda()
24 | self.states = self.states.cuda()
25 | self.rewards = self.rewards.cuda()
26 | self.value_preds = self.value_preds.cuda()
27 | self.returns = self.returns.cuda()
28 | self.action_log_probs = self.action_log_probs.cuda()
29 | self.actions = self.actions.cuda()
30 | self.masks = self.masks.cuda()
31 |
32 | def insert(self, step, current_obs, state, action, action_log_prob, value_pred, reward, mask):
33 | self.observations[step + 1].copy_(current_obs)
34 | self.states[step + 1].copy_(state)
35 | self.actions[step].copy_(action)
36 | self.action_log_probs[step].copy_(action_log_prob)
37 | self.value_preds[step].copy_(value_pred)
38 | self.rewards[step].copy_(reward)
39 | self.masks[step + 1].copy_(mask)
40 |
41 | def after_update(self):
42 | self.observations[0].copy_(self.observations[-1])
43 | self.states[0].copy_(self.states[-1])
44 | self.masks[0].copy_(self.masks[-1])
45 |
46 | def compute_returns(self, next_value, use_gae, gamma, tau):
47 | if use_gae:
48 | self.value_preds[-1] = next_value
49 | gae = 0
50 | for step in reversed(range(self.rewards.size(0))):
51 | delta = self.rewards[step] + gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
52 | gae = delta + gamma * tau * self.masks[step + 1] * gae
53 | self.returns[step] = gae + self.value_preds[step]
54 | else:
55 | self.returns[-1] = next_value
56 | for step in reversed(range(self.rewards.size(0))):
57 | self.returns[step] = self.returns[step + 1] * \
58 | gamma * self.masks[step + 1] + self.rewards[step]
59 |
60 |
61 | def feed_forward_generator(self, advantages, num_mini_batch):
62 | num_steps, num_processes = self.rewards.size()[0:2]
63 | batch_size = num_processes * num_steps
64 | mini_batch_size = batch_size // num_mini_batch
65 | sampler = BatchSampler(SubsetRandomSampler(range(batch_size)), mini_batch_size, drop_last=False)
66 | for indices in sampler:
67 | indices = torch.LongTensor(indices)
68 |
69 | if advantages.is_cuda:
70 | indices = indices.cuda()
71 |
72 | observations_batch = self.observations[:-1].view(-1,
73 | *self.observations.size()[2:])[indices]
74 | states_batch = self.states[:-1].view(-1, self.states.size(-1))[indices]
75 | actions_batch = self.actions.view(-1, self.actions.size(-1))[indices]
76 | return_batch = self.returns[:-1].view(-1, 1)[indices]
77 | masks_batch = self.masks[:-1].view(-1, 1)[indices]
78 | old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices]
79 | adv_targ = advantages.view(-1, 1)[indices]
80 |
81 | yield observations_batch, states_batch, actions_batch, \
82 | return_batch, masks_batch, old_action_log_probs_batch, adv_targ
--------------------------------------------------------------------------------
/trainer.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import numpy as np
3 | import time
4 | from params import Breakout_Params, Params
5 | from envs import *
6 | from ppo import *
7 | from collections import deque
8 | import os.path as osp
9 | from utils import *
10 |
11 | from baselines.common.atari_wrappers import make_atari, wrap_deepmind
12 | from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
13 | from baselines.common.vec_env.vec_frame_stack import VecFrameStack
14 | from baselines import bench, logger
15 |
16 | import torch
17 |
18 |
19 | class Trainer(object):
20 | def __init__(self, env, agent, args):
21 | self.args = args
22 | self.env = env
23 | self.agent = agent
24 |
25 | self.nenv = env.num_envs
26 | self.obs = np.zeros((self.nenv,) + env.observation_space.shape)
27 | self.obs = np.transpose(self.obs, (0, 3, 1, 2))
28 | self.obs[:] = env.reset() # This is channel last
29 | self.dones = [False for _ in range(self.nenv)]
30 |
31 |
32 | def run(self):
33 | mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_logpacs = [],[],[],[],[],[]
34 | epinfos = []
35 |
36 | for _ in range(self.args.nsteps): # 1 roll-out
37 | values, actions, logpacs = self.agent.step(self.obs)
38 |
39 | mb_obs.append(self.obs.copy())
40 | mb_actions.append(actions)
41 | mb_values.append(values)
42 | mb_dones.append(self.dones)
43 | mb_logpacs.append(logpacs)
44 | self.obs[:], rewards, self.dones, infos = self.env.step(actions)
45 | for info in infos:
46 | maybeepinfo = info.get('episode')
47 | if maybeepinfo: epinfos.append(maybeepinfo)
48 | mb_rewards.append(rewards)
49 |
50 | mb_obs = np.asarray(mb_obs)
51 | mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
52 | mb_actions = np.asarray(mb_actions)
53 | mb_values = np.asarray(mb_values, dtype=np.float32)
54 | mb_logpacs = np.array(mb_logpacs, dtype=np.float32)
55 | mb_dones = np.asarray(mb_dones, dtype=np.bool)
56 |
57 | last_value, _, _ = self.agent.step(self.obs)
58 |
59 | # discount / boostrap off value
60 | mb_returns = np.zeros_like(mb_rewards)
61 | mb_advs = np.zeros_like(mb_rewards)
62 | lastgaelam = 0
63 |
64 | print('rewards shape', mb_rewards.size())
65 |
66 | for t in reversed(range(self.args.nsteps)):
67 | if t == self.args.nsteps - 1:
68 | nextnonterminal = 1.0 - self.dones
69 | nextvalues = last_value
70 | else:
71 | nextnonterminal = 1.0 - mb_dones[t+1]
72 | nextvalues = mb_values[t+1]
73 | delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
74 | mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.lam * nextnonterminal * lastgaelam
75 | mb_returns = mb_advs + mb_values
76 |
77 | return (*map(flatten_env_vec, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_logpacs)), epinfos)
78 |
79 |
80 | def learn(self):
81 | # Number of samples in one roll-out
82 | nbatch = self.nenv * self.args.nsteps
83 | nbatch_train = nbatch // self.args.nminibatches
84 |
85 | # Total number of steps to run simulation
86 | total_timesteps = self.args.num_timesteps
87 | # Number of times to run optimization
88 | nupdates = int(total_timesteps // nbatch)
89 |
90 | epinfobuf = deque(maxlen=100)
91 |
92 | for update in range(1, nupdates+1):
93 | assert nbatch % self.args.nminibatches == 0
94 |
95 | # Adaptive clip-range and learning-rate decaying
96 | frac = 1.0 - (update - 1.0) / nupdates
97 | lrnow = self.args.lr_schedule(frac)
98 | cliprangenow = self.args.clip_range_schedule(frac)
99 | num_steps_so_far = update * nbatch
100 |
101 | before_run = time.time()
102 | obs, returns, masks, actions, values, logpacs, epinfos = self.run()
103 | run_time = time.time() - before_run
104 |
105 | epinfobuf.extend(epinfos)
106 | inds = np.arange(nbatch)
107 | mblossvals = []
108 |
109 |
110 | before_update = time.time()
111 | for _ in range(self.args.num_update_epochs):
112 | np.random.shuffle(inds)
113 | # Per mini-batches in one roll-out
114 | for start in range(0, nbatch, nbatch_train):
115 | end = start + nbatch_train
116 | batch_inds = inds[start : end]
117 | slices = (arr[batch_inds] for arr in (obs, returns, masks, actions, values, logpacs))
118 | pg_loss, vf_loss, entropy = self.agent.update(*slices, lrnow, cliprangenow)
119 | mblossvals.append([pg_loss, vf_loss, entropy])
120 | update_time = time.time() - before_update
121 |
122 | # Logging
123 | lossvals = np.mean(mblossvals, axis=0)
124 |
125 | if update % self.args.log_interval == 0 or update == 1:
126 | logger.logkv("Run time", run_time)
127 | logger.logkv("Update time", update_time)
128 | logger.logkv("serial_timestep", update * self.args.nsteps)
129 | logger.logkv("num_updates", update)
130 | logger.logkv("total_timesteps", update * nbatch)
131 | logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
132 | logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
133 | for (lossval, lossname) in zip(lossvals, self.agent.loss_names):
134 | logger.logkv(lossname, lossval)
135 | logger.dumpkvs()
136 |
137 | self.env.close()
138 |
139 |
140 | def safemean(xs):
141 | return np.nan if len(xs) == 0 else np.mean(xs)
142 |
143 |
144 | def test_breakout():
145 | logger.configure('./log', ['stdout', 'tensorboard'])
146 | args = Breakout_Params()
147 |
148 |
149 | nenvs = 8
150 | env = SubprocVecEnv([make_env(i, 'BreakoutNoFrameskip-v4') for i in range(nenvs)])
151 | env = PyTorch_VecFrameStack(env, args.num_stack)
152 |
153 | print(env.observation_space.shape)
154 |
155 | torch.manual_seed(args.seed)
156 | if args.cuda:
157 | torch.cuda.manual_seed(args.seed)
158 |
159 | ppo = PPO_Discrete(env, args)
160 | trainer = Trainer(env, ppo, args)
161 | print('Init success')
162 |
163 | trainer.run()
164 | print('Roll-out success')
165 |
166 | # trainer.learn()
167 | # print('Success')
168 |
169 | if __name__ == "__main__":
170 | test_breakout()
--------------------------------------------------------------------------------
/trainer_plus.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import numpy as np
3 | import time
4 | from params import Breakout_Params, Params
5 | from envs import *
6 | from ppo import *
7 | from collections import deque
8 | import os.path as osp
9 | from utils import *
10 |
11 | from baselines.common.atari_wrappers import make_atari, wrap_deepmind
12 | from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
13 | from baselines.common.vec_env.vec_frame_stack import VecFrameStack
14 | from baselines import bench, logger
15 |
16 | import torch
17 |
18 |
19 | class TrainerPlus(object):
20 | def __init__(self, env, agent, args):
21 | self.args = args
22 | self.env = env
23 | self.agent = agent
24 |
25 | self.dtype = torch.FloatTensor
26 | self.atype = torch.LongTensor
27 | if args.cuda:
28 | self.dtype = torch.cuda.FloatTensor
29 | self.atype = torch.cuda.LongTensor
30 |
31 | self.nenv = env.num_envs
32 | # self.obs = np.zeros((self.nenv,) + env.observation_space.shape)
33 |
34 | self.obs = torch.from_numpy(env.reset()).type(self.dtype) # This is channel first
35 | self.dones = torch.zeros(self.nenv).type(self.dtype)
36 |
37 | self.mb_obs = torch.zeros(self.args.nsteps, *self.obs.size()).type(self.dtype)
38 | self.mb_rewards = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
39 | self.mb_actions = torch.zeros(self.args.nsteps, self.nenv).type(self.atype)
40 | self.mb_values = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
41 | self.mb_dones = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
42 | self.mb_logpacs = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
43 | self.mb_returns = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
44 | self.mb_advs = torch.zeros(self.args.nsteps, self.nenv).type(self.dtype)
45 |
46 |
47 | def run(self):
48 | epinfos = []
49 |
50 | for step in range(self.args.nsteps): # 1 roll-out
51 | values, actions, logpacs = self.agent.step(self.obs)
52 | cpu_actions = actions.data.cpu().numpy().astype(np.int32).reshape((-1))
53 |
54 | self.mb_obs[step].copy_(self.obs)
55 | self.mb_values[step].copy_(values.data.view(-1))
56 | self.mb_actions[step].copy_(actions.data.view(-1))
57 | self.mb_dones[step].copy_(self.dones.view(-1))
58 | self.mb_logpacs[step].copy_(logpacs.data.view(-1))
59 |
60 | obs, rewards, dones, infos = self.env.step(cpu_actions)
61 | self.obs.copy_(torch.from_numpy(obs).type(self.dtype))
62 | self.dones.copy_(torch.from_numpy(dones.astype(int)).type(self.dtype))
63 |
64 | for info in infos:
65 | maybeepinfo = info.get('episode')
66 | if maybeepinfo: epinfos.append(maybeepinfo)
67 | self.mb_rewards[step].copy_(torch.from_numpy(rewards).type(self.dtype))
68 |
69 | last_value, _, _ = self.agent.step(self.obs)
70 | last_value = last_value.data.view(-1)
71 |
72 | # discount / boostrap off value
73 | lastgaelam = 0
74 |
75 | for t in reversed(range(self.args.nsteps)):
76 | if t == self.args.nsteps - 1:
77 | nextnonterminal = 1.0 - self.dones
78 | nextvalues = last_value
79 | else:
80 | nextnonterminal = 1.0 - self.mb_dones[t+1]
81 | nextvalues = self.mb_values[t+1]
82 | delta = self.mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - self.mb_values[t]
83 | lastgaelam = delta + self.args.gamma * self.args.lam * nextnonterminal * lastgaelam
84 | self.mb_advs[t].copy_(lastgaelam)
85 | self.mb_returns.copy_(self.mb_advs + self.mb_values)
86 | return (*map(flatten_env_vec, (self.mb_obs, self.mb_returns, self.mb_dones, self.mb_actions, self.mb_values, self.mb_logpacs)), epinfos)
87 |
88 |
89 | def learn(self):
90 | # Number of samples in one roll-out
91 | nbatch = self.nenv * self.args.nsteps
92 | nbatch_train = nbatch // self.args.nminibatches
93 |
94 | # Total number of steps to run simulation
95 | total_timesteps = self.args.num_timesteps
96 | # Number of times to run optimization
97 | nupdates = int(total_timesteps // nbatch)
98 |
99 | epinfobuf = deque(maxlen=100)
100 |
101 | for update in range(1, nupdates+1):
102 | assert nbatch % self.args.nminibatches == 0
103 |
104 | # Adaptive clip-range and learning-rate decaying
105 | frac = 1.0 - (update - 1.0) / nupdates
106 | lrnow = self.args.lr_schedule(frac)
107 | cliprangenow = self.args.clip_range_schedule(frac)
108 | num_steps_so_far = update * nbatch
109 |
110 | before_run = time.time()
111 | obs, returns, masks, actions, values, logpacs, epinfos = self.run()
112 | run_time = time.time() - before_run
113 |
114 | epinfobuf.extend(epinfos)
115 | inds = np.arange(nbatch)
116 | mblossvals = []
117 |
118 | before_update = time.time()
119 | for _ in range(self.args.num_update_epochs):
120 | np.random.shuffle(inds)
121 | # Per mini-batches in one roll-out
122 | for start in range(0, nbatch, nbatch_train):
123 | end = start + nbatch_train
124 | batch_inds = torch.from_numpy(inds[start : end]).type(self.atype)
125 | slices = (arr[batch_inds] for arr in (obs, returns, masks, actions, values, logpacs))
126 | pg_loss, vf_loss, entropy = self.agent.update(*slices, lrnow, cliprangenow)
127 | mblossvals.append([pg_loss, vf_loss, entropy])
128 | update_time = time.time() - before_update
129 |
130 | # Logging
131 | lossvals = np.mean(mblossvals, axis=0)
132 |
133 | if update % self.args.log_interval == 0 or update == 1:
134 | logger.logkv("Run time", run_time)
135 | logger.logkv("Update time", update_time)
136 | logger.logkv("serial_timestep", update * self.args.nsteps)
137 | logger.logkv("num_updates", update)
138 | logger.logkv("total_timesteps", update * nbatch)
139 | logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
140 | logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
141 | for (lossval, lossname) in zip(lossvals, self.agent.loss_names):
142 | logger.logkv(lossname, lossval)
143 | logger.dumpkvs()
144 |
145 | self.env.close()
146 |
147 |
148 |
149 | def safemean(xs):
150 | return np.nan if len(xs) == 0 else np.mean(xs)
151 |
152 |
153 | def test_breakout():
154 | logger.configure('./log', ['stdout', 'tensorboard'])
155 | args = Breakout_Params()
156 |
157 |
158 | nenvs = 8
159 | env = SubprocVecEnv([make_env(i, 'BreakoutNoFrameskip-v4') for i in range(nenvs)])
160 | env = PyTorch_VecFrameStack(env, args.num_stack)
161 |
162 | torch.manual_seed(args.seed)
163 | if args.cuda:
164 | torch.cuda.manual_seed(args.seed)
165 |
166 | ppo = PPO_Discrete(env, args)
167 | trainer = TrainerPlus(env, ppo, args)
168 | print('Init success')
169 |
170 | # trainer.run()
171 | # print('Roll-out success')
172 |
173 | trainer.learn()
174 | print('Success')
175 |
176 | if __name__ == "__main__":
177 | test_breakout()
178 |
179 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 |
6 | def orthogonal(tensor, gain = 1):
7 | if tensor.ndimension() < 2:
8 | raise ValueError("Only tensors with 2 or more dimensions are suppored")
9 |
10 | rows = tensor.size(0)
11 | cols = tensor[0].numel()
12 |
13 | flattened = torch.Tensor(rows, cols).normal_(0, 1)
14 |
15 | if rows < cols:
16 | flattened.t_()
17 |
18 |
19 | q, r = torch.qr(flattened)
20 | d = torch.diag(r, 0)
21 | ph = d.sign()
22 |
23 | q *= ph.expand_as(q)
24 |
25 | if rows < cols:
26 | q.t_()
27 |
28 | tensor.view_as(q).copy_(q)
29 | tensor.mul_(gain)
30 | return tensor
31 |
32 | '''
33 | def flatten_env_vec(arr):
34 | s = arr.shape
35 | return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
36 | '''
37 |
38 | def flatten_env_vec(arr):
39 | s = list(arr.size())
40 | # return torch.transpose(arr, 0, 1).view(s[0] * s[1], *s[2:])
41 | return arr.view(s[0] * s[1], *s[2:])
--------------------------------------------------------------------------------