├── README.md
├── VAE_for_imbalanced_data.ipynb
├── Variational_Autoencoder_data_augmentation.ipynb
└── Create_Autoencoder_Model_Basemodel_3Embeddings.ipynb
/README.md:
--------------------------------------------------------------------------------
1 | # Autoencoder
2 |
3 | I use the famous iris-dataset to create an Autoencoder with PyTorch. I then show the difference between a PCA and an embedding space build by the Autoencoder.
4 |
--------------------------------------------------------------------------------
/VAE_for_imbalanced_data.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "language_info": {
4 | "codemirror_mode": {
5 | "name": "ipython",
6 | "version": 3
7 | },
8 | "file_extension": ".py",
9 | "mimetype": "text/x-python",
10 | "name": "python",
11 | "nbconvert_exporter": "python",
12 | "pygments_lexer": "ipython3",
13 | "version": "3.9.2-final"
14 | },
15 | "orig_nbformat": 2,
16 | "kernelspec": {
17 | "name": "python3",
18 | "display_name": "Python 3.9.2 64-bit",
19 | "metadata": {
20 | "interpreter": {
21 | "hash": "9139ca13fc640d8623238ac4ed44beace8a76f86a07bab6efe75c2506e18783d"
22 | }
23 | }
24 | }
25 | },
26 | "nbformat": 4,
27 | "nbformat_minor": 2,
28 | "cells": [
29 | {
30 | "cell_type": "code",
31 | "execution_count": 374,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "import torch\n",
36 | "import torch.nn as nn\n",
37 | "import torch.nn.functional as F\n",
38 | "from torch import nn, optim\n",
39 | "from torch.autograd import Variable\n",
40 | "\n",
41 | "import pandas as pd\n",
42 | "import numpy as np\n",
43 | "from sklearn import preprocessing\n",
44 | "from sklearn.model_selection import train_test_split\n",
45 | "import mlprepare as mlp \n",
46 | "from sklearn.ensemble import RandomForestClassifier\n",
47 | "from sklearn.metrics import confusion_matrix"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": 2,
53 | "metadata": {},
54 | "outputs": [
55 | {
56 | "output_type": "execute_result",
57 | "data": {
58 | "text/plain": [
59 | "device(type='cpu')"
60 | ]
61 | },
62 | "metadata": {},
63 | "execution_count": 2
64 | }
65 | ],
66 | "source": [
67 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
68 | "device"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 3,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "DATA_PATH = 'data/creditcard.csv'"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": 327,
83 | "metadata": {},
84 | "outputs": [
85 | {
86 | "output_type": "execute_result",
87 | "data": {
88 | "text/plain": [
89 | " Time V1 V2 V3 V4 V5 V6 V7 \\\n",
90 | "0 0.0 -1.359807 -0.072781 2.536347 1.378155 -0.338321 0.462388 0.239599 \n",
91 | "1 0.0 1.191857 0.266151 0.166480 0.448154 0.060018 -0.082361 -0.078803 \n",
92 | "2 1.0 -1.358354 -1.340163 1.773209 0.379780 -0.503198 1.800499 0.791461 \n",
93 | "3 1.0 -0.966272 -0.185226 1.792993 -0.863291 -0.010309 1.247203 0.237609 \n",
94 | "4 2.0 -1.158233 0.877737 1.548718 0.403034 -0.407193 0.095921 0.592941 \n",
95 | "\n",
96 | " V8 V9 ... V21 V22 V23 V24 V25 \\\n",
97 | "0 0.098698 0.363787 ... -0.018307 0.277838 -0.110474 0.066928 0.128539 \n",
98 | "1 0.085102 -0.255425 ... -0.225775 -0.638672 0.101288 -0.339846 0.167170 \n",
99 | "2 0.247676 -1.514654 ... 0.247998 0.771679 0.909412 -0.689281 -0.327642 \n",
100 | "3 0.377436 -1.387024 ... -0.108300 0.005274 -0.190321 -1.175575 0.647376 \n",
101 | "4 -0.270533 0.817739 ... -0.009431 0.798278 -0.137458 0.141267 -0.206010 \n",
102 | "\n",
103 | " V26 V27 V28 Amount Class \n",
104 | "0 -0.189115 0.133558 -0.021053 149.62 0 \n",
105 | "1 0.125895 -0.008983 0.014724 2.69 0 \n",
106 | "2 -0.139097 -0.055353 -0.059752 378.66 0 \n",
107 | "3 -0.221929 0.062723 0.061458 123.50 0 \n",
108 | "4 0.502292 0.219422 0.215153 69.99 0 \n",
109 | "\n",
110 | "[5 rows x 31 columns]"
111 | ],
112 | "text/html": "
\n\n
\n \n \n | \n Time | \n V1 | \n V2 | \n V3 | \n V4 | \n V5 | \n V6 | \n V7 | \n V8 | \n V9 | \n ... | \n V21 | \n V22 | \n V23 | \n V24 | \n V25 | \n V26 | \n V27 | \n V28 | \n Amount | \n Class | \n
\n \n \n \n | 0 | \n 0.0 | \n -1.359807 | \n -0.072781 | \n 2.536347 | \n 1.378155 | \n -0.338321 | \n 0.462388 | \n 0.239599 | \n 0.098698 | \n 0.363787 | \n ... | \n -0.018307 | \n 0.277838 | \n -0.110474 | \n 0.066928 | \n 0.128539 | \n -0.189115 | \n 0.133558 | \n -0.021053 | \n 149.62 | \n 0 | \n
\n \n | 1 | \n 0.0 | \n 1.191857 | \n 0.266151 | \n 0.166480 | \n 0.448154 | \n 0.060018 | \n -0.082361 | \n -0.078803 | \n 0.085102 | \n -0.255425 | \n ... | \n -0.225775 | \n -0.638672 | \n 0.101288 | \n -0.339846 | \n 0.167170 | \n 0.125895 | \n -0.008983 | \n 0.014724 | \n 2.69 | \n 0 | \n
\n \n | 2 | \n 1.0 | \n -1.358354 | \n -1.340163 | \n 1.773209 | \n 0.379780 | \n -0.503198 | \n 1.800499 | \n 0.791461 | \n 0.247676 | \n -1.514654 | \n ... | \n 0.247998 | \n 0.771679 | \n 0.909412 | \n -0.689281 | \n -0.327642 | \n -0.139097 | \n -0.055353 | \n -0.059752 | \n 378.66 | \n 0 | \n
\n \n | 3 | \n 1.0 | \n -0.966272 | \n -0.185226 | \n 1.792993 | \n -0.863291 | \n -0.010309 | \n 1.247203 | \n 0.237609 | \n 0.377436 | \n -1.387024 | \n ... | \n -0.108300 | \n 0.005274 | \n -0.190321 | \n -1.175575 | \n 0.647376 | \n -0.221929 | \n 0.062723 | \n 0.061458 | \n 123.50 | \n 0 | \n
\n \n | 4 | \n 2.0 | \n -1.158233 | \n 0.877737 | \n 1.548718 | \n 0.403034 | \n -0.407193 | \n 0.095921 | \n 0.592941 | \n -0.270533 | \n 0.817739 | \n ... | \n -0.009431 | \n 0.798278 | \n -0.137458 | \n 0.141267 | \n -0.206010 | \n 0.502292 | \n 0.219422 | \n 0.215153 | \n 69.99 | \n 0 | \n
\n \n
\n
5 rows × 31 columns
\n
"
113 | },
114 | "metadata": {},
115 | "execution_count": 327
116 | }
117 | ],
118 | "source": [
119 | "df = pd.read_csv(DATA_PATH, sep=',')\n",
120 | "df.head()"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "df_base = df.copy()"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 282,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "cols = df_base.columns"
139 | ]
140 | },
141 | {
142 | "source": [
143 | "We need to normalize Time and Amount"
144 | ],
145 | "cell_type": "markdown",
146 | "metadata": {}
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 184,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "mean_time=df_base['Time'].mean()\n",
155 | "mean_amount=df_base['Amount'].mean()\n",
156 | "std_time=df_base['Time'].std()\n",
157 | "std_amount=df_base['Amount'].std()\n",
158 | "\n",
159 | "df_base['Time']=(df_base['Time']-mean_time)/std_time\n",
160 | "df_base['Amount']=(df_base['Amount']-mean_amount)/std_amount"
161 | ]
162 | },
163 | {
164 | "source": [
165 | "Class=1 means that this was indeed a fraud case, class=0 means no fraud. This dataset is highly imbalanced:"
166 | ],
167 | "cell_type": "markdown",
168 | "metadata": {}
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": 185,
173 | "metadata": {},
174 | "outputs": [
175 | {
176 | "output_type": "execute_result",
177 | "data": {
178 | "text/plain": [
179 | "0 284315\n",
180 | "1 492\n",
181 | "Name: Class, dtype: int64"
182 | ]
183 | },
184 | "metadata": {},
185 | "execution_count": 185
186 | }
187 | ],
188 | "source": [
189 | "df_base['Class'].value_counts()"
190 | ]
191 | },
192 | {
193 | "source": [
194 | "I want to create fake data based on the 492 cases, which I will then use to improve the model. Let's first train a simple RandomForest."
195 | ],
196 | "cell_type": "markdown",
197 | "metadata": {}
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 186,
202 | "metadata": {},
203 | "outputs": [],
204 | "source": [
205 | "X_train, X_test, y_train, y_test = mlp.split_df(df_base, dep_var='Class', test_size=0.3, split_mode='random')\n"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": 368,
211 | "metadata": {},
212 | "outputs": [
213 | {
214 | "output_type": "execute_result",
215 | "data": {
216 | "text/plain": [
217 | "0 85286\n",
218 | "1 157\n",
219 | "Name: Class, dtype: int64"
220 | ]
221 | },
222 | "metadata": {},
223 | "execution_count": 368
224 | }
225 | ],
226 | "source": [
227 | "y_test.value_counts()"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": 369,
233 | "metadata": {},
234 | "outputs": [
235 | {
236 | "output_type": "execute_result",
237 | "data": {
238 | "text/plain": [
239 | "543.2229299363057"
240 | ]
241 | },
242 | "metadata": {},
243 | "execution_count": 369
244 | }
245 | ],
246 | "source": [
247 | "#Ratio of the two classes:\n",
248 | "y_test.value_counts()[0]/y_test.value_counts()[1]"
249 | ]
250 | },
251 | {
252 | "source": [
253 | "RandomForest with Oversampling"
254 | ],
255 | "cell_type": "markdown",
256 | "metadata": {}
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": 406,
261 | "metadata": {},
262 | "outputs": [],
263 | "source": [
264 | "def rf(xs, y, n_estimators=40, max_samples=500,\n",
265 | " max_features=0.5, min_samples_leaf=5, **kwargs):\n",
266 | " return RandomForestClassifier(n_jobs=-1, n_estimators=n_estimators,\n",
267 | " max_samples=max_samples, max_features=max_features,\n",
268 | " min_samples_leaf=min_samples_leaf, oob_score=True, class_weight={0:1,1:543}).fit(xs, y)"
269 | ]
270 | },
271 | {
272 | "cell_type": "code",
273 | "execution_count": 407,
274 | "metadata": {},
275 | "outputs": [],
276 | "source": [
277 | "m = rf(X_train, y_train)"
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": 408,
283 | "metadata": {},
284 | "outputs": [
285 | {
286 | "output_type": "execute_result",
287 | "data": {
288 | "text/plain": [
289 | "array([[85278, 8],\n",
290 | " [ 118, 39]], dtype=int64)"
291 | ]
292 | },
293 | "metadata": {},
294 | "execution_count": 408
295 | }
296 | ],
297 | "source": [
298 | "confusion_matrix(y_test, np.round(m.predict(X_test)))"
299 | ]
300 | },
301 | {
302 | "source": [
303 | "With this technique we get about 39 out of 157 Fraud cases, although the results vary quite a lot!"
304 | ],
305 | "cell_type": "markdown",
306 | "metadata": {}
307 | },
308 | {
309 | "source": [
310 | "# Fake Data with VAE"
311 | ],
312 | "cell_type": "markdown",
313 | "metadata": {}
314 | },
315 | {
316 | "source": [
317 | "We want only where y_train/test_train =1"
318 | ],
319 | "cell_type": "markdown",
320 | "metadata": {}
321 | },
322 | {
323 | "cell_type": "code",
324 | "execution_count": 264,
325 | "metadata": {},
326 | "outputs": [],
327 | "source": [
328 | "X_train_fraud = X_train.iloc[np.where(y_train==1)[0]]\n",
329 | "X_test_fraud = X_test.iloc[np.where(y_test==1)[0]]"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": 265,
335 | "metadata": {},
336 | "outputs": [],
337 | "source": [
338 | "from torch.utils.data import Dataset, DataLoader\n",
339 | "class DataBuilder(Dataset):\n",
340 | " def __init__(self, dataset):\n",
341 | " self.x = dataset.values\n",
342 | " self.x = torch.from_numpy(self.x).to(torch.float)\n",
343 | " self.len=self.x.shape[0]\n",
344 | " def __getitem__(self,index): \n",
345 | " return self.x[index]\n",
346 | " def __len__(self):\n",
347 | " return self.len\n"
348 | ]
349 | },
350 | {
351 | "cell_type": "code",
352 | "execution_count": 266,
353 | "metadata": {},
354 | "outputs": [],
355 | "source": [
356 | "traindata_set=DataBuilder(X_train_fraud)\n",
357 | "testdata_set=DataBuilder(X_test_fraud)\n",
358 | "\n",
359 | "trainloader=DataLoader(dataset=traindata_set,batch_size=1024)\n",
360 | "testloader=DataLoader(dataset=testdata_set,batch_size=1024)"
361 | ]
362 | },
363 | {
364 | "cell_type": "code",
365 | "execution_count": 267,
366 | "metadata": {},
367 | "outputs": [],
368 | "source": [
369 | "class Autoencoder(nn.Module):\n",
370 | " def __init__(self,D_in,H=50,H2=12,latent_dim=3):\n",
371 | " \n",
372 | " #Encoder\n",
373 | " super(Autoencoder,self).__init__()\n",
374 | " self.linear1=nn.Linear(D_in,H)\n",
375 | " self.lin_bn1 = nn.BatchNorm1d(num_features=H)\n",
376 | " self.linear2=nn.Linear(H,H2)\n",
377 | " self.lin_bn2 = nn.BatchNorm1d(num_features=H2)\n",
378 | " self.linear3=nn.Linear(H2,H2)\n",
379 | " self.lin_bn3 = nn.BatchNorm1d(num_features=H2)\n",
380 | " \n",
381 | " # Latent vectors mu and sigma\n",
382 | " self.fc1 = nn.Linear(H2, latent_dim)\n",
383 | " self.bn1 = nn.BatchNorm1d(num_features=latent_dim)\n",
384 | " self.fc21 = nn.Linear(latent_dim, latent_dim)\n",
385 | " self.fc22 = nn.Linear(latent_dim, latent_dim)\n",
386 | "\n",
387 | " # Sampling vector\n",
388 | " self.fc3 = nn.Linear(latent_dim, latent_dim)\n",
389 | " self.fc_bn3 = nn.BatchNorm1d(latent_dim)\n",
390 | " self.fc4 = nn.Linear(latent_dim, H2)\n",
391 | " self.fc_bn4 = nn.BatchNorm1d(H2)\n",
392 | " \n",
393 | " # Decoder\n",
394 | " self.linear4=nn.Linear(H2,H2)\n",
395 | " self.lin_bn4 = nn.BatchNorm1d(num_features=H2)\n",
396 | " self.linear5=nn.Linear(H2,H)\n",
397 | " self.lin_bn5 = nn.BatchNorm1d(num_features=H)\n",
398 | " self.linear6=nn.Linear(H,D_in)\n",
399 | " self.lin_bn6 = nn.BatchNorm1d(num_features=D_in)\n",
400 | " \n",
401 | " self.relu = nn.ReLU()\n",
402 | " \n",
403 | " def encode(self, x):\n",
404 | " lin1 = self.relu(self.lin_bn1(self.linear1(x)))\n",
405 | " lin2 = self.relu(self.lin_bn2(self.linear2(lin1)))\n",
406 | " lin3 = self.relu(self.lin_bn3(self.linear3(lin2)))\n",
407 | "\n",
408 | " fc1 = F.relu(self.bn1(self.fc1(lin3)))\n",
409 | "\n",
410 | " r1 = self.fc21(fc1)\n",
411 | " r2 = self.fc22(fc1)\n",
412 | " \n",
413 | " return r1, r2\n",
414 | " \n",
415 | " def reparameterize(self, mu, logvar):\n",
416 | " if self.training:\n",
417 | " std = logvar.mul(0.5).exp_()\n",
418 | " eps = Variable(std.data.new(std.size()).normal_())\n",
419 | " return eps.mul(std).add_(mu)\n",
420 | " else:\n",
421 | " return mu\n",
422 | " \n",
423 | " def decode(self, z):\n",
424 | " fc3 = self.relu(self.fc_bn3(self.fc3(z)))\n",
425 | " fc4 = self.relu(self.fc_bn4(self.fc4(fc3)))\n",
426 | "\n",
427 | " lin4 = self.relu(self.lin_bn4(self.linear4(fc4)))\n",
428 | " lin5 = self.relu(self.lin_bn5(self.linear5(lin4)))\n",
429 | " return self.lin_bn6(self.linear6(lin5))\n",
430 | "\n",
431 | "\n",
432 | " \n",
433 | " def forward(self, x):\n",
434 | " mu, logvar = self.encode(x)\n",
435 | " z = self.reparameterize(mu, logvar)\n",
436 | " return self.decode(z), mu, logvar"
437 | ]
438 | },
439 | {
440 | "cell_type": "code",
441 | "execution_count": 268,
442 | "metadata": {},
443 | "outputs": [],
444 | "source": [
445 | "class customLoss(nn.Module):\n",
446 | " def __init__(self):\n",
447 | " super(customLoss, self).__init__()\n",
448 | " self.mse_loss = nn.MSELoss(reduction=\"sum\")\n",
449 | " \n",
450 | " # x_recon ist der im forward im Model erstellte recon_batch, x ist der originale x Batch, mu ist mu und logvar ist logvar \n",
451 | " def forward(self, x_recon, x, mu, logvar):\n",
452 | " loss_MSE = self.mse_loss(x_recon, x)\n",
453 | " loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n",
454 | "\n",
455 | " return loss_MSE + loss_KLD"
456 | ]
457 | },
458 | {
459 | "cell_type": "code",
460 | "execution_count": 269,
461 | "metadata": {},
462 | "outputs": [],
463 | "source": [
464 | "D_in = traindata_set.x.shape[1]\n",
465 | "H = 50\n",
466 | "H2 = 12\n",
467 | "model = Autoencoder(D_in, H, H2).to(device)\n",
468 | "optimizer = optim.Adam(model.parameters(), lr=1e-3)"
469 | ]
470 | },
471 | {
472 | "cell_type": "code",
473 | "execution_count": 270,
474 | "metadata": {},
475 | "outputs": [],
476 | "source": [
477 | "loss_mse = customLoss()"
478 | ]
479 | },
480 | {
481 | "source": [
482 | "## Train Model"
483 | ],
484 | "cell_type": "markdown",
485 | "metadata": {}
486 | },
487 | {
488 | "cell_type": "code",
489 | "execution_count": 271,
490 | "metadata": {},
491 | "outputs": [],
492 | "source": [
493 | "log_interval = 50\n",
494 | "val_losses = []\n",
495 | "train_losses = []\n",
496 | "test_losses = []"
497 | ]
498 | },
499 | {
500 | "cell_type": "code",
501 | "execution_count": 272,
502 | "metadata": {},
503 | "outputs": [],
504 | "source": [
505 | "def train(epoch):\n",
506 | " model.train()\n",
507 | " train_loss = 0\n",
508 | " for batch_idx, data in enumerate(trainloader):\n",
509 | " data = data.to(device)\n",
510 | " optimizer.zero_grad()\n",
511 | " recon_batch, mu, logvar = model(data)\n",
512 | " loss = loss_mse(recon_batch, data, mu, logvar)\n",
513 | " loss.backward()\n",
514 | " train_loss += loss.item()\n",
515 | " optimizer.step()\n",
516 | " if epoch % 200 == 0: \n",
517 | " print('====> Epoch: {} Average training loss: {:.4f}'.format(\n",
518 | " epoch, train_loss / len(trainloader.dataset)))\n",
519 | " train_losses.append(train_loss / len(trainloader.dataset))"
520 | ]
521 | },
522 | {
523 | "cell_type": "code",
524 | "execution_count": 273,
525 | "metadata": {},
526 | "outputs": [],
527 | "source": [
528 | "def test(epoch):\n",
529 | " with torch.no_grad():\n",
530 | " test_loss = 0\n",
531 | " for batch_idx, data in enumerate(testloader):\n",
532 | " data = data.to(device)\n",
533 | " optimizer.zero_grad()\n",
534 | " recon_batch, mu, logvar = model(data)\n",
535 | " loss = loss_mse(recon_batch, data, mu, logvar)\n",
536 | " test_loss += loss.item()\n",
537 | " if epoch % 200 == 0: \n",
538 | " print('====> Epoch: {} Average test loss: {:.4f}'.format(\n",
539 | " epoch, test_loss / len(testloader.dataset)))\n",
540 | " test_losses.append(test_loss / len(testloader.dataset))"
541 | ]
542 | },
543 | {
544 | "cell_type": "code",
545 | "execution_count": 274,
546 | "metadata": {},
547 | "outputs": [
548 | {
549 | "output_type": "stream",
550 | "name": "stdout",
551 | "text": [
552 | "====> Epoch: 200 Average training loss: 706.2121\n",
553 | "====> Epoch: 200 Average test loss: 590.0016\n",
554 | "====> Epoch: 400 Average training loss: 620.5279\n",
555 | "====> Epoch: 400 Average test loss: 521.3142\n",
556 | "====> Epoch: 600 Average training loss: 566.4392\n",
557 | "====> Epoch: 600 Average test loss: 477.5008\n",
558 | "====> Epoch: 800 Average training loss: 521.7474\n",
559 | "====> Epoch: 800 Average test loss: 440.3243\n",
560 | "====> Epoch: 1000 Average training loss: 481.2092\n",
561 | "====> Epoch: 1000 Average test loss: 407.7625\n",
562 | "====> Epoch: 1200 Average training loss: 434.3898\n",
563 | "====> Epoch: 1200 Average test loss: 362.2760\n",
564 | "====> Epoch: 1400 Average training loss: 396.9551\n",
565 | "====> Epoch: 1400 Average test loss: 343.7408\n"
566 | ]
567 | }
568 | ],
569 | "source": [
570 | "epochs = 1500\n",
571 | "for epoch in range(1, epochs + 1):\n",
572 | " train(epoch)\n",
573 | " test(epoch)"
574 | ]
575 | },
576 | {
577 | "source": [
578 | "We're still improving so keep going "
579 | ],
580 | "cell_type": "markdown",
581 | "metadata": {}
582 | },
583 | {
584 | "cell_type": "code",
585 | "execution_count": 275,
586 | "metadata": {},
587 | "outputs": [
588 | {
589 | "output_type": "stream",
590 | "name": "stdout",
591 | "text": [
592 | "====> Epoch: 200 Average training loss: 343.3472\n",
593 | "====> Epoch: 200 Average test loss: 300.3575\n",
594 | "====> Epoch: 400 Average training loss: 310.5800\n",
595 | "====> Epoch: 400 Average test loss: 285.6697\n",
596 | "====> Epoch: 600 Average training loss: 281.8408\n",
597 | "====> Epoch: 600 Average test loss: 263.7150\n",
598 | "====> Epoch: 800 Average training loss: 256.1950\n",
599 | "====> Epoch: 800 Average test loss: 244.9427\n",
600 | "====> Epoch: 1000 Average training loss: 232.6077\n",
601 | "====> Epoch: 1000 Average test loss: 236.3014\n",
602 | "====> Epoch: 1200 Average training loss: 211.2899\n",
603 | "====> Epoch: 1200 Average test loss: 217.6404\n",
604 | "====> Epoch: 1400 Average training loss: 191.3525\n",
605 | "====> Epoch: 1400 Average test loss: 205.8287\n",
606 | "====> Epoch: 1600 Average training loss: 174.0826\n",
607 | "====> Epoch: 1600 Average test loss: 189.0589\n",
608 | "====> Epoch: 1800 Average training loss: 157.4292\n",
609 | "====> Epoch: 1800 Average test loss: 175.6006\n",
610 | "====> Epoch: 2000 Average training loss: 143.2475\n",
611 | "====> Epoch: 2000 Average test loss: 177.1668\n",
612 | "====> Epoch: 2200 Average training loss: 129.9684\n",
613 | "====> Epoch: 2200 Average test loss: 160.4641\n",
614 | "====> Epoch: 2400 Average training loss: 117.6745\n",
615 | "====> Epoch: 2400 Average test loss: 150.9483\n"
616 | ]
617 | }
618 | ],
619 | "source": [
620 | "epochs = 2500\n",
621 | "optimizer = optim.Adam(model.parameters(), lr=1e-3)\n",
622 | "for epoch in range(1, epochs + 1):\n",
623 | " train(epoch)\n",
624 | " test(epoch)"
625 | ]
626 | },
627 | {
628 | "cell_type": "code",
629 | "execution_count": 278,
630 | "metadata": {},
631 | "outputs": [
632 | {
633 | "output_type": "stream",
634 | "name": "stdout",
635 | "text": [
636 | "====> Epoch: 200 Average training loss: 54.6816\n",
637 | "====> Epoch: 200 Average test loss: 129.6853\n",
638 | "====> Epoch: 400 Average training loss: 48.5159\n",
639 | "====> Epoch: 400 Average test loss: 134.4429\n"
640 | ]
641 | }
642 | ],
643 | "source": [
644 | "epochs = 500\n",
645 | "optimizer = optim.Adam(model.parameters(), lr=1e-3)\n",
646 | "for epoch in range(1, epochs + 1):\n",
647 | " train(epoch)\n",
648 | " test(epoch)"
649 | ]
650 | },
651 | {
652 | "source": [
653 | "Let's look at the results:"
654 | ],
655 | "cell_type": "markdown",
656 | "metadata": {}
657 | },
658 | {
659 | "cell_type": "code",
660 | "execution_count": 279,
661 | "metadata": {},
662 | "outputs": [],
663 | "source": [
664 | "with torch.no_grad():\n",
665 | " for batch_idx, data in enumerate(testloader):\n",
666 | " data = data.to(device)\n",
667 | " optimizer.zero_grad()\n",
668 | " recon_batch, mu, logvar = model(data)"
669 | ]
670 | },
671 | {
672 | "cell_type": "code",
673 | "execution_count": 288,
674 | "metadata": {},
675 | "outputs": [],
676 | "source": [
677 | "recon_row = recon_batch[0].cpu().numpy()\n",
678 | "recon_row = np.append(recon_row, [1])\n",
679 | "real_row = testloader.dataset.x[0].cpu().numpy()\n",
680 | "real_row = np.append(real_row, [1])"
681 | ]
682 | },
683 | {
684 | "cell_type": "code",
685 | "execution_count": 290,
686 | "metadata": {},
687 | "outputs": [
688 | {
689 | "output_type": "execute_result",
690 | "data": {
691 | "text/plain": [
692 | " Time V1 V2 V3 V4 V5 V6 \\\n",
693 | "0 -0.196971 -7.667089 5.699276 -10.15090 10.077229 -7.307253 -2.589641 \n",
694 | "1 0.910404 -5.839191 7.151532 -12.81676 7.031115 -9.651272 -2.938427 \n",
695 | "\n",
696 | " V7 V8 V9 ... V21 V22 V23 V24 \\\n",
697 | "0 -9.824335 3.019747 -7.658296 ... 1.073921 0.034662 0.247951 0.00464 \n",
698 | "1 -11.543207 4.843626 -3.494276 ... 2.462056 1.054865 0.530481 0.47267 \n",
699 | "\n",
700 | " V25 V26 V27 V28 Amount Class \n",
701 | "0 -0.037674 0.597619 0.763070 -0.609457 -0.377716 1.0 \n",
702 | "1 -0.275998 0.282435 0.104886 0.254417 0.910404 1.0 \n",
703 | "\n",
704 | "[2 rows x 31 columns]"
705 | ],
706 | "text/html": "\n\n
\n \n \n | \n Time | \n V1 | \n V2 | \n V3 | \n V4 | \n V5 | \n V6 | \n V7 | \n V8 | \n V9 | \n ... | \n V21 | \n V22 | \n V23 | \n V24 | \n V25 | \n V26 | \n V27 | \n V28 | \n Amount | \n Class | \n
\n \n \n \n | 0 | \n -0.196971 | \n -7.667089 | \n 5.699276 | \n -10.15090 | \n 10.077229 | \n -7.307253 | \n -2.589641 | \n -9.824335 | \n 3.019747 | \n -7.658296 | \n ... | \n 1.073921 | \n 0.034662 | \n 0.247951 | \n 0.00464 | \n -0.037674 | \n 0.597619 | \n 0.763070 | \n -0.609457 | \n -0.377716 | \n 1.0 | \n
\n \n | 1 | \n 0.910404 | \n -5.839191 | \n 7.151532 | \n -12.81676 | \n 7.031115 | \n -9.651272 | \n -2.938427 | \n -11.543207 | \n 4.843626 | \n -3.494276 | \n ... | \n 2.462056 | \n 1.054865 | \n 0.530481 | \n 0.47267 | \n -0.275998 | \n 0.282435 | \n 0.104886 | \n 0.254417 | \n 0.910404 | \n 1.0 | \n
\n \n
\n
2 rows × 31 columns
\n
"
707 | },
708 | "metadata": {},
709 | "execution_count": 290
710 | }
711 | ],
712 | "source": [
713 | "df = pd.DataFrame(np.stack((recon_row, real_row)), columns = cols)\n",
714 | "df"
715 | ]
716 | },
717 | {
718 | "cell_type": "code",
719 | "execution_count": 293,
720 | "metadata": {},
721 | "outputs": [],
722 | "source": [
723 | "sigma = torch.exp(logvar/2)"
724 | ]
725 | },
726 | {
727 | "cell_type": "code",
728 | "execution_count": 294,
729 | "metadata": {},
730 | "outputs": [
731 | {
732 | "output_type": "execute_result",
733 | "data": {
734 | "text/plain": [
735 | "(tensor([0.0001, 0.0163, 0.0400]), tensor([0.9976, 0.0370, 0.0381]))"
736 | ]
737 | },
738 | "metadata": {},
739 | "execution_count": 294
740 | }
741 | ],
742 | "source": [
743 | "mu.mean(axis=0), sigma.mean(axis=0)"
744 | ]
745 | },
746 | {
747 | "cell_type": "code",
748 | "execution_count": 295,
749 | "metadata": {},
750 | "outputs": [],
751 | "source": [
752 | "# sample z from q\n",
753 | "no_samples = 20\n",
754 | "q = torch.distributions.Normal(mu.mean(axis=0), sigma.mean(axis=0))\n",
755 | "z = q.rsample(sample_shape=torch.Size([no_samples]))"
756 | ]
757 | },
758 | {
759 | "cell_type": "code",
760 | "execution_count": 318,
761 | "metadata": {},
762 | "outputs": [],
763 | "source": [
764 | "with torch.no_grad():\n",
765 | " pred = model.decode(z).cpu().numpy()"
766 | ]
767 | },
768 | {
769 | "cell_type": "code",
770 | "execution_count": 324,
771 | "metadata": {},
772 | "outputs": [
773 | {
774 | "output_type": "execute_result",
775 | "data": {
776 | "text/plain": [
777 | " Time V1 V2 V3 V4 V5 V6 \\\n",
778 | "0 -1.014143 1.505616 -4.616234 7.718655 -0.977422 8.594662 -3.198405 \n",
779 | "1 -1.810440 -13.005595 1.212420 5.370727 2.069537 -1.141557 -3.816671 \n",
780 | "2 -1.152523 12.006341 -3.014931 4.485871 -1.155190 10.059814 -3.355832 \n",
781 | "3 0.228914 -5.935965 -1.644437 -6.354884 7.788726 -0.055751 -1.726003 \n",
782 | "4 0.180823 -3.444491 4.722339 -4.571048 4.998073 -4.543203 -0.816252 \n",
783 | "\n",
784 | " V7 V8 V9 ... V21 V22 V23 V24 \\\n",
785 | "0 -6.944025 -5.043085 2.561653 ... 1.094700 0.510489 -1.254657 -0.085117 \n",
786 | "1 -6.958980 4.140651 -1.208175 ... 0.902933 -0.573067 1.209823 0.543091 \n",
787 | "2 -8.342437 -8.336978 2.741910 ... -0.101801 1.417866 -2.335097 0.034988 \n",
788 | "3 0.577209 1.638260 -5.880371 ... -5.350942 2.994604 -0.079382 -1.020990 \n",
789 | "4 -5.482205 3.643872 -4.685173 ... -1.748235 1.525022 0.258438 -0.465014 \n",
790 | "\n",
791 | " V25 V26 V27 V28 Amount Class \n",
792 | "0 0.283567 -0.268765 3.025049 0.929408 -79.125496 1 \n",
793 | "1 0.666637 -0.524895 0.204588 -0.074243 -380.632935 1 \n",
794 | "2 -0.466923 -0.012957 2.653872 1.081970 -163.960175 1 \n",
795 | "3 -0.090167 0.395981 -1.590370 -1.090804 9.417862 1 \n",
796 | "4 0.064509 0.277528 1.127516 0.161839 171.483337 1 \n",
797 | "\n",
798 | "[5 rows x 31 columns]"
799 | ],
800 | "text/html": "\n\n
\n \n \n | \n Time | \n V1 | \n V2 | \n V3 | \n V4 | \n V5 | \n V6 | \n V7 | \n V8 | \n V9 | \n ... | \n V21 | \n V22 | \n V23 | \n V24 | \n V25 | \n V26 | \n V27 | \n V28 | \n Amount | \n Class | \n
\n \n \n \n | 0 | \n -1.014143 | \n 1.505616 | \n -4.616234 | \n 7.718655 | \n -0.977422 | \n 8.594662 | \n -3.198405 | \n -6.944025 | \n -5.043085 | \n 2.561653 | \n ... | \n 1.094700 | \n 0.510489 | \n -1.254657 | \n -0.085117 | \n 0.283567 | \n -0.268765 | \n 3.025049 | \n 0.929408 | \n -79.125496 | \n 1 | \n
\n \n | 1 | \n -1.810440 | \n -13.005595 | \n 1.212420 | \n 5.370727 | \n 2.069537 | \n -1.141557 | \n -3.816671 | \n -6.958980 | \n 4.140651 | \n -1.208175 | \n ... | \n 0.902933 | \n -0.573067 | \n 1.209823 | \n 0.543091 | \n 0.666637 | \n -0.524895 | \n 0.204588 | \n -0.074243 | \n -380.632935 | \n 1 | \n
\n \n | 2 | \n -1.152523 | \n 12.006341 | \n -3.014931 | \n 4.485871 | \n -1.155190 | \n 10.059814 | \n -3.355832 | \n -8.342437 | \n -8.336978 | \n 2.741910 | \n ... | \n -0.101801 | \n 1.417866 | \n -2.335097 | \n 0.034988 | \n -0.466923 | \n -0.012957 | \n 2.653872 | \n 1.081970 | \n -163.960175 | \n 1 | \n
\n \n | 3 | \n 0.228914 | \n -5.935965 | \n -1.644437 | \n -6.354884 | \n 7.788726 | \n -0.055751 | \n -1.726003 | \n 0.577209 | \n 1.638260 | \n -5.880371 | \n ... | \n -5.350942 | \n 2.994604 | \n -0.079382 | \n -1.020990 | \n -0.090167 | \n 0.395981 | \n -1.590370 | \n -1.090804 | \n 9.417862 | \n 1 | \n
\n \n | 4 | \n 0.180823 | \n -3.444491 | \n 4.722339 | \n -4.571048 | \n 4.998073 | \n -4.543203 | \n -0.816252 | \n -5.482205 | \n 3.643872 | \n -4.685173 | \n ... | \n -1.748235 | \n 1.525022 | \n 0.258438 | \n -0.465014 | \n 0.064509 | \n 0.277528 | \n 1.127516 | \n 0.161839 | \n 171.483337 | \n 1 | \n
\n \n
\n
5 rows × 31 columns
\n
"
801 | },
802 | "metadata": {},
803 | "execution_count": 324
804 | }
805 | ],
806 | "source": [
807 | "df_fake = pd.DataFrame(pred)\n",
808 | "df_fake['Class']=1\n",
809 | "df_fake.columns = cols\n",
810 | "df_fake['Class'] = np.round(df_fake['Class']).astype(int)\n",
811 | "df_fake['Time'] = (df_fake['Time']*std_time)+mean_time\n",
812 | "df_fake['Amount'] = (df_fake['Amount']*std_amount)+mean_amount\n",
813 | "df_fake.head()"
814 | ]
815 | },
816 | {
817 | "cell_type": "code",
818 | "execution_count": 325,
819 | "metadata": {},
820 | "outputs": [
821 | {
822 | "output_type": "execute_result",
823 | "data": {
824 | "text/plain": [
825 | "121.77293"
826 | ]
827 | },
828 | "metadata": {},
829 | "execution_count": 325
830 | }
831 | ],
832 | "source": [
833 | "df_fake['Amount'].mean()"
834 | ]
835 | },
836 | {
837 | "cell_type": "code",
838 | "execution_count": 338,
839 | "metadata": {},
840 | "outputs": [
841 | {
842 | "output_type": "execute_result",
843 | "data": {
844 | "text/plain": [
845 | "Class\n",
846 | "0 88.291022\n",
847 | "1 122.211321\n",
848 | "Name: Amount, dtype: float64"
849 | ]
850 | },
851 | "metadata": {},
852 | "execution_count": 338
853 | }
854 | ],
855 | "source": [
856 | "df.groupby('Class').mean()['Amount']"
857 | ]
858 | },
859 | {
860 | "source": [
861 | "Use fake data for oversampling in RandomForest"
862 | ],
863 | "cell_type": "markdown",
864 | "metadata": {}
865 | },
866 | {
867 | "cell_type": "code",
868 | "execution_count": 344,
869 | "metadata": {},
870 | "outputs": [
871 | {
872 | "output_type": "execute_result",
873 | "data": {
874 | "text/plain": [
875 | "0 199029\n",
876 | "1 335\n",
877 | "Name: Class, dtype: int64"
878 | ]
879 | },
880 | "metadata": {},
881 | "execution_count": 344
882 | }
883 | ],
884 | "source": [
885 | "y_train.value_counts()"
886 | ]
887 | },
888 | {
889 | "source": [
890 | "So let's build about 190.000 fake fraud detection cases:"
891 | ],
892 | "cell_type": "markdown",
893 | "metadata": {}
894 | },
895 | {
896 | "cell_type": "code",
897 | "execution_count": 346,
898 | "metadata": {},
899 | "outputs": [],
900 | "source": [
901 | "no_samples = 190_000\n",
902 | "q = torch.distributions.Normal(mu.mean(axis=0), sigma.mean(axis=0))\n",
903 | "z = q.rsample(sample_shape=torch.Size([no_samples]))"
904 | ]
905 | },
906 | {
907 | "cell_type": "code",
908 | "execution_count": 347,
909 | "metadata": {},
910 | "outputs": [],
911 | "source": [
912 | "with torch.no_grad():\n",
913 | " pred = model.decode(z).cpu().numpy()"
914 | ]
915 | },
916 | {
917 | "source": [
918 | "Concat to our X_train:"
919 | ],
920 | "cell_type": "markdown",
921 | "metadata": {}
922 | },
923 | {
924 | "cell_type": "code",
925 | "execution_count": 365,
926 | "metadata": {},
927 | "outputs": [
928 | {
929 | "output_type": "execute_result",
930 | "data": {
931 | "text/plain": [
932 | "(389364, 30)"
933 | ]
934 | },
935 | "metadata": {},
936 | "execution_count": 365
937 | }
938 | ],
939 | "source": [
940 | "X_train_augmented = np.vstack((X_train.values, pred))\n",
941 | "y_train_augmented = np.append(y_train.values, np.repeat(1,no_samples))\n",
942 | "X_train_augmented.shape"
943 | ]
944 | },
945 | {
946 | "source": [
947 | "We now have roughly as many fraud cases as we have non-fraud cases. "
948 | ],
949 | "cell_type": "markdown",
950 | "metadata": {}
951 | },
952 | {
953 | "source": [
954 | "## Train RandomForest"
955 | ],
956 | "cell_type": "markdown",
957 | "metadata": {}
958 | },
959 | {
960 | "cell_type": "code",
961 | "execution_count": 409,
962 | "metadata": {},
963 | "outputs": [],
964 | "source": [
965 | "def rf_aug(xs, y, n_estimators=40, max_samples=500,\n",
966 | " max_features=0.5, min_samples_leaf=5, **kwargs):\n",
967 | " return RandomForestClassifier(n_jobs=-1, n_estimators=n_estimators,\n",
968 | " max_samples=max_samples, max_features=max_features,\n",
969 | " min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)"
970 | ]
971 | },
972 | {
973 | "cell_type": "code",
974 | "execution_count": 412,
975 | "metadata": {},
976 | "outputs": [
977 | {
978 | "output_type": "execute_result",
979 | "data": {
980 | "text/plain": [
981 | "array([[84963, 323],\n",
982 | " [ 30, 127]], dtype=int64)"
983 | ]
984 | },
985 | "metadata": {},
986 | "execution_count": 412
987 | }
988 | ],
989 | "source": [
990 | "m_aug = rf_aug(X_train_augmented, y_train_augmented)\n",
991 | "confusion_matrix(y_test, np.round(m_aug.predict(X_test)))"
992 | ]
993 | },
994 | {
995 | "cell_type": "code",
996 | "execution_count": 413,
997 | "metadata": {},
998 | "outputs": [
999 | {
1000 | "output_type": "execute_result",
1001 | "data": {
1002 | "text/plain": [
1003 | "array([[85278, 8],\n",
1004 | " [ 118, 39]], dtype=int64)"
1005 | ]
1006 | },
1007 | "metadata": {},
1008 | "execution_count": 413
1009 | }
1010 | ],
1011 | "source": [
1012 | "confusion_matrix(y_test, np.round(m.predict(X_test)))"
1013 | ]
1014 | },
1015 | {
1016 | "source": [
1017 | "Look at that! We managed to find 127 out of 157! "
1018 | ],
1019 | "cell_type": "markdown",
1020 | "metadata": {}
1021 | },
1022 | {
1023 | "cell_type": "code",
1024 | "execution_count": null,
1025 | "metadata": {},
1026 | "outputs": [],
1027 | "source": []
1028 | }
1029 | ]
1030 | }
--------------------------------------------------------------------------------
/Variational_Autoencoder_data_augmentation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "language_info": {
4 | "codemirror_mode": {
5 | "name": "ipython",
6 | "version": 3
7 | },
8 | "file_extension": ".py",
9 | "mimetype": "text/x-python",
10 | "name": "python",
11 | "nbconvert_exporter": "python",
12 | "pygments_lexer": "ipython3",
13 | "version": "3.9.2-final"
14 | },
15 | "orig_nbformat": 2,
16 | "kernelspec": {
17 | "name": "python3",
18 | "display_name": "Python 3.9.2 64-bit ('deeplearning_venv': venv)",
19 | "metadata": {
20 | "interpreter": {
21 | "hash": "9139ca13fc640d8623238ac4ed44beace8a76f86a07bab6efe75c2506e18783d"
22 | }
23 | }
24 | }
25 | },
26 | "nbformat": 4,
27 | "nbformat_minor": 2,
28 | "cells": [
29 | {
30 | "source": [
31 | "# Variational Autoencoder"
32 | ],
33 | "cell_type": "markdown",
34 | "metadata": {}
35 | },
36 | {
37 | "source": [
38 | "## How to create fake tabular data to enhance machine learning algorithms"
39 | ],
40 | "cell_type": "markdown",
41 | "metadata": {}
42 | },
43 | {
44 | "source": [
45 | "To train deeplearning models the more data the better. When we're thinking of image data, the deeplearnig community thought about a lot of tricks how to enhance the model given a dataset of images: image enhancement. Meaning that by rotating, flipping, blurring etc the image we can create more input data and also improve our model. \n",
46 | "\n",
47 | "Hoever, when thinking about tabular data, only few of these techniques exist. In this notebook I want to show you how to create a variational autoencoder to make use of data enhancement. I will create fake data, which is sampled from the learned distribution of the underlying data. "
48 | ],
49 | "cell_type": "markdown",
50 | "metadata": {}
51 | },
52 | {
53 | "cell_type": "code",
54 | "execution_count": 215,
55 | "metadata": {},
56 | "outputs": [],
57 | "source": [
58 | "import torch\n",
59 | "import torch.nn as nn\n",
60 | "import torch.nn.functional as F\n",
61 | "from torch import nn, optim\n",
62 | "from torch.autograd import Variable\n",
63 | "from sklearn.decomposition import PCA\n",
64 | "\n",
65 | "import pandas as pd\n",
66 | "import numpy as np\n",
67 | "from sklearn import preprocessing\n",
68 | "from sklearn.model_selection import train_test_split"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 216,
74 | "metadata": {},
75 | "outputs": [
76 | {
77 | "output_type": "execute_result",
78 | "data": {
79 | "text/plain": [
80 | "device(type='cpu')"
81 | ]
82 | },
83 | "metadata": {},
84 | "execution_count": 216
85 | }
86 | ],
87 | "source": [
88 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
89 | "device"
90 | ]
91 | },
92 | {
93 | "source": [
94 | "### Define path to dataset"
95 | ],
96 | "cell_type": "markdown",
97 | "metadata": {}
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": 217,
102 | "metadata": {},
103 | "outputs": [],
104 | "source": [
105 | "DATA_PATH = 'data/wine.csv'"
106 | ]
107 | },
108 | {
109 | "source": [
110 | "## Dataset Overview"
111 | ],
112 | "cell_type": "markdown",
113 | "metadata": {}
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 440,
118 | "metadata": {},
119 | "outputs": [
120 | {
121 | "output_type": "execute_result",
122 | "data": {
123 | "text/plain": [
124 | " Wine Alcohol Malic.acid Ash Acl Mg Phenols Flavanoids \\\n",
125 | "0 1 14.23 1.71 2.43 15.6 127 2.80 3.06 \n",
126 | "1 1 13.20 1.78 2.14 11.2 100 2.65 2.76 \n",
127 | "2 1 13.16 2.36 2.67 18.6 101 2.80 3.24 \n",
128 | "3 1 14.37 1.95 2.50 16.8 113 3.85 3.49 \n",
129 | "4 1 13.24 2.59 2.87 21.0 118 2.80 2.69 \n",
130 | "\n",
131 | " Nonflavanoid.phenols Proanth Color.int Hue OD Proline \n",
132 | "0 0.28 2.29 5.64 1.04 3.92 1065 \n",
133 | "1 0.26 1.28 4.38 1.05 3.40 1050 \n",
134 | "2 0.30 2.81 5.68 1.03 3.17 1185 \n",
135 | "3 0.24 2.18 7.80 0.86 3.45 1480 \n",
136 | "4 0.39 1.82 4.32 1.04 2.93 735 "
137 | ],
138 | "text/html": "\n\n
\n \n \n | \n Wine | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n \n \n | 0 | \n 1 | \n 14.23 | \n 1.71 | \n 2.43 | \n 15.6 | \n 127 | \n 2.80 | \n 3.06 | \n 0.28 | \n 2.29 | \n 5.64 | \n 1.04 | \n 3.92 | \n 1065 | \n
\n \n | 1 | \n 1 | \n 13.20 | \n 1.78 | \n 2.14 | \n 11.2 | \n 100 | \n 2.65 | \n 2.76 | \n 0.26 | \n 1.28 | \n 4.38 | \n 1.05 | \n 3.40 | \n 1050 | \n
\n \n | 2 | \n 1 | \n 13.16 | \n 2.36 | \n 2.67 | \n 18.6 | \n 101 | \n 2.80 | \n 3.24 | \n 0.30 | \n 2.81 | \n 5.68 | \n 1.03 | \n 3.17 | \n 1185 | \n
\n \n | 3 | \n 1 | \n 14.37 | \n 1.95 | \n 2.50 | \n 16.8 | \n 113 | \n 3.85 | \n 3.49 | \n 0.24 | \n 2.18 | \n 7.80 | \n 0.86 | \n 3.45 | \n 1480 | \n
\n \n | 4 | \n 1 | \n 13.24 | \n 2.59 | \n 2.87 | \n 21.0 | \n 118 | \n 2.80 | \n 2.69 | \n 0.39 | \n 1.82 | \n 4.32 | \n 1.04 | \n 2.93 | \n 735 | \n
\n \n
\n
"
139 | },
140 | "metadata": {},
141 | "execution_count": 440
142 | }
143 | ],
144 | "source": [
145 | "df_base = pd.read_csv(DATA_PATH, sep=',')\n",
146 | "df_base.head()"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": 441,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "cols = df_base.columns"
156 | ]
157 | },
158 | {
159 | "source": [
160 | "## Build Data Loader"
161 | ],
162 | "cell_type": "markdown",
163 | "metadata": {}
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": 222,
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "def load_and_standardize_data(path):\n",
172 | " # read in from csv\n",
173 | " df = pd.read_csv(path, sep=',')\n",
174 | " # replace nan with -99\n",
175 | " df = df.fillna(-99)\n",
176 | " df = df.values.reshape(-1, df.shape[1]).astype('float32')\n",
177 | " # randomly split\n",
178 | " X_train, X_test = train_test_split(df, test_size=0.3, random_state=42)\n",
179 | " # standardize values\n",
180 | " scaler = preprocessing.StandardScaler()\n",
181 | " X_train = scaler.fit_transform(X_train)\n",
182 | " X_test = scaler.transform(X_test) \n",
183 | " return X_train, X_test, scaler"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": 223,
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "from torch.utils.data import Dataset, DataLoader\n",
193 | "class DataBuilder(Dataset):\n",
194 | " def __init__(self, path, train=True):\n",
195 | " self.X_train, self.X_test, self.standardizer = load_and_standardize_data(DATA_PATH)\n",
196 | " if train:\n",
197 | " self.x = torch.from_numpy(self.X_train)\n",
198 | " self.len=self.x.shape[0]\n",
199 | " else:\n",
200 | " self.x = torch.from_numpy(self.X_test)\n",
201 | " self.len=self.x.shape[0]\n",
202 | " del self.X_train\n",
203 | " del self.X_test \n",
204 | " def __getitem__(self,index): \n",
205 | " return self.x[index]\n",
206 | " def __len__(self):\n",
207 | " return self.len"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": 224,
213 | "metadata": {},
214 | "outputs": [],
215 | "source": [
216 | "traindata_set=DataBuilder(DATA_PATH, train=True)\n",
217 | "testdata_set=DataBuilder(DATA_PATH, train=False)\n",
218 | "\n",
219 | "trainloader=DataLoader(dataset=traindata_set,batch_size=1024)\n",
220 | "testloader=DataLoader(dataset=testdata_set,batch_size=1024)"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": 225,
226 | "metadata": {},
227 | "outputs": [
228 | {
229 | "output_type": "execute_result",
230 | "data": {
231 | "text/plain": [
232 | "(torch.Tensor, torch.Tensor)"
233 | ]
234 | },
235 | "metadata": {},
236 | "execution_count": 225
237 | }
238 | ],
239 | "source": [
240 | "type(trainloader.dataset.x), type(testloader.dataset.x)"
241 | ]
242 | },
243 | {
244 | "cell_type": "code",
245 | "execution_count": 226,
246 | "metadata": {},
247 | "outputs": [
248 | {
249 | "output_type": "execute_result",
250 | "data": {
251 | "text/plain": [
252 | "(torch.Size([124, 14]), torch.Size([54, 14]))"
253 | ]
254 | },
255 | "metadata": {},
256 | "execution_count": 226
257 | }
258 | ],
259 | "source": [
260 | "trainloader.dataset.x.shape, testloader.dataset.x.shape"
261 | ]
262 | },
263 | {
264 | "cell_type": "code",
265 | "execution_count": 227,
266 | "metadata": {},
267 | "outputs": [
268 | {
269 | "output_type": "execute_result",
270 | "data": {
271 | "text/plain": [
272 | "tensor([[ 1.3598, 0.6284, 1.0812, ..., -0.6414, -1.0709, -0.5182],\n",
273 | " [ 0.0628, -0.5409, -0.6130, ..., 0.3465, 1.3308, -0.2151],\n",
274 | " [ 0.0628, -0.7557, -1.2870, ..., 0.4324, -0.3984, 0.0420],\n",
275 | " ...,\n",
276 | " [-1.2343, 1.6904, -0.4855, ..., 1.0338, 0.5485, 2.6682],\n",
277 | " [ 0.0628, -0.3261, -0.7952, ..., 0.0029, -0.7415, -0.7983],\n",
278 | " [ 0.0628, -0.7437, 0.0428, ..., -0.6843, 1.0700, -0.9861]])"
279 | ]
280 | },
281 | "metadata": {},
282 | "execution_count": 227
283 | }
284 | ],
285 | "source": [
286 | "trainloader.dataset.x"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": 228,
292 | "metadata": {},
293 | "outputs": [
294 | {
295 | "output_type": "execute_result",
296 | "data": {
297 | "text/plain": [
298 | ""
299 | ]
300 | },
301 | "metadata": {},
302 | "execution_count": 228
303 | }
304 | ],
305 | "source": [
306 | "trainloader.dataset.standardizer.inverse_transform"
307 | ]
308 | },
309 | {
310 | "source": [
311 | "## Build model"
312 | ],
313 | "cell_type": "markdown",
314 | "metadata": {}
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": 229,
319 | "metadata": {},
320 | "outputs": [],
321 | "source": [
322 | "class Autoencoder(nn.Module):\n",
323 | " def __init__(self,D_in,H=50,H2=12,latent_dim=3):\n",
324 | " \n",
325 | " #Encoder\n",
326 | " super(Autoencoder,self).__init__()\n",
327 | " self.linear1=nn.Linear(D_in,H)\n",
328 | " self.lin_bn1 = nn.BatchNorm1d(num_features=H)\n",
329 | " self.linear2=nn.Linear(H,H2)\n",
330 | " self.lin_bn2 = nn.BatchNorm1d(num_features=H2)\n",
331 | " self.linear3=nn.Linear(H2,H2)\n",
332 | " self.lin_bn3 = nn.BatchNorm1d(num_features=H2)\n",
333 | " \n",
334 | "# # Latent vectors mu and sigma\n",
335 | " self.fc1 = nn.Linear(H2, latent_dim)\n",
336 | " self.bn1 = nn.BatchNorm1d(num_features=latent_dim)\n",
337 | " self.fc21 = nn.Linear(latent_dim, latent_dim)\n",
338 | " self.fc22 = nn.Linear(latent_dim, latent_dim)\n",
339 | "\n",
340 | "# # Sampling vector\n",
341 | " self.fc3 = nn.Linear(latent_dim, latent_dim)\n",
342 | " self.fc_bn3 = nn.BatchNorm1d(latent_dim)\n",
343 | " self.fc4 = nn.Linear(latent_dim, H2)\n",
344 | " self.fc_bn4 = nn.BatchNorm1d(H2)\n",
345 | " \n",
346 | "# # Decoder\n",
347 | " self.linear4=nn.Linear(H2,H2)\n",
348 | " self.lin_bn4 = nn.BatchNorm1d(num_features=H2)\n",
349 | " self.linear5=nn.Linear(H2,H)\n",
350 | " self.lin_bn5 = nn.BatchNorm1d(num_features=H)\n",
351 | " self.linear6=nn.Linear(H,D_in)\n",
352 | " self.lin_bn6 = nn.BatchNorm1d(num_features=D_in)\n",
353 | " \n",
354 | " self.relu = nn.ReLU()\n",
355 | " \n",
356 | " def encode(self, x):\n",
357 | " lin1 = self.relu(self.lin_bn1(self.linear1(x)))\n",
358 | " lin2 = self.relu(self.lin_bn2(self.linear2(lin1)))\n",
359 | " lin3 = self.relu(self.lin_bn3(self.linear3(lin2)))\n",
360 | "\n",
361 | " fc1 = F.relu(self.bn1(self.fc1(lin3)))\n",
362 | "\n",
363 | " r1 = self.fc21(fc1)\n",
364 | " r2 = self.fc22(fc1)\n",
365 | " \n",
366 | " return r1, r2\n",
367 | " \n",
368 | " def reparameterize(self, mu, logvar):\n",
369 | " if self.training:\n",
370 | " std = logvar.mul(0.5).exp_()\n",
371 | " eps = Variable(std.data.new(std.size()).normal_())\n",
372 | " return eps.mul(std).add_(mu)\n",
373 | " else:\n",
374 | " return mu\n",
375 | " \n",
376 | " def decode(self, z):\n",
377 | " fc3 = self.relu(self.fc_bn3(self.fc3(z)))\n",
378 | " fc4 = self.relu(self.fc_bn4(self.fc4(fc3)))\n",
379 | "\n",
380 | " lin4 = self.relu(self.lin_bn4(self.linear4(fc4)))\n",
381 | " lin5 = self.relu(self.lin_bn5(self.linear5(lin4)))\n",
382 | " return self.lin_bn6(self.linear6(lin5))\n",
383 | "\n",
384 | "\n",
385 | " \n",
386 | " def forward(self, x):\n",
387 | " mu, logvar = self.encode(x)\n",
388 | " z = self.reparameterize(mu, logvar)\n",
389 | " # self.decode(z) ist später recon_batch, mu ist mu und logvar ist logvar\n",
390 | " return self.decode(z), mu, logvar"
391 | ]
392 | },
393 | {
394 | "cell_type": "code",
395 | "execution_count": 230,
396 | "metadata": {},
397 | "outputs": [],
398 | "source": [
399 | "class customLoss(nn.Module):\n",
400 | " def __init__(self):\n",
401 | " super(customLoss, self).__init__()\n",
402 | " self.mse_loss = nn.MSELoss(reduction=\"sum\")\n",
403 | " \n",
404 | " # x_recon ist der im forward im Model erstellte recon_batch, x ist der originale x Batch, mu ist mu und logvar ist logvar \n",
405 | " def forward(self, x_recon, x, mu, logvar):\n",
406 | " loss_MSE = self.mse_loss(x_recon, x)\n",
407 | " loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n",
408 | "\n",
409 | " return loss_MSE + loss_KLD"
410 | ]
411 | },
412 | {
413 | "cell_type": "code",
414 | "execution_count": 231,
415 | "metadata": {},
416 | "outputs": [],
417 | "source": [
418 | "# takes in a module and applies the specified weight initialization\n",
419 | "def weights_init_uniform_rule(m):\n",
420 | " classname = m.__class__.__name__\n",
421 | " # for every Linear layer in a model..\n",
422 | " if classname.find('Linear') != -1:\n",
423 | " # get the number of the inputs\n",
424 | " n = m.in_features\n",
425 | " y = 1.0/np.sqrt(n)\n",
426 | " m.weight.data.uniform_(-y, y)\n",
427 | " m.bias.data.fill_(0)"
428 | ]
429 | },
430 | {
431 | "source": [
432 | "If you want to better understand the variational autoencoder technique, look [here](https://towardsdatascience.com/understanding-variational-autoencoders-vaes-f70510919f73).\n",
433 | "\n",
434 | "For better understanding this AutoencoderClass, let me go briefly through it. This is a variational autoencoder (VAE) with two hidden layers, which (by default, but you can change this) 50 and then 12 activations. The latent factors are set to 3 (you can change that, too). So we're first exploding our initially 14 variables to 50 activations, then condensing it to 12, then to 3. From these 3 latent factors we then sample to recreate the original 14 values. We do that by inflating the 3 latent factors back to 12, then 50 and finally 14 activations (we decode the latent factors so to speak). With this reconstructed batch (recon_batch) we compare it with the original batch, computate our loss and adjust the weights and biases via our gradient (our optimizer here will be Adam). "
435 | ],
436 | "cell_type": "markdown",
437 | "metadata": {}
438 | },
439 | {
440 | "cell_type": "code",
441 | "execution_count": 232,
442 | "metadata": {},
443 | "outputs": [],
444 | "source": [
445 | "D_in = data_set.x.shape[1]\n",
446 | "H = 50\n",
447 | "H2 = 12\n",
448 | "model = Autoencoder(D_in, H, H2).to(device)\n",
449 | "optimizer = optim.Adam(model.parameters(), lr=1e-3)"
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": 233,
455 | "metadata": {},
456 | "outputs": [],
457 | "source": [
458 | "loss_mse = customLoss()"
459 | ]
460 | },
461 | {
462 | "source": [
463 | "## Train Model"
464 | ],
465 | "cell_type": "markdown",
466 | "metadata": {}
467 | },
468 | {
469 | "cell_type": "code",
470 | "execution_count": 234,
471 | "metadata": {},
472 | "outputs": [],
473 | "source": [
474 | "epochs = 1500\n",
475 | "log_interval = 50\n",
476 | "val_losses = []\n",
477 | "train_losses = []\n",
478 | "test_losses = []"
479 | ]
480 | },
481 | {
482 | "cell_type": "code",
483 | "execution_count": 235,
484 | "metadata": {},
485 | "outputs": [],
486 | "source": [
487 | "def train(epoch):\n",
488 | " model.train()\n",
489 | " train_loss = 0\n",
490 | " for batch_idx, data in enumerate(trainloader):\n",
491 | " data = data.to(device)\n",
492 | " optimizer.zero_grad()\n",
493 | " recon_batch, mu, logvar = model(data)\n",
494 | " loss = loss_mse(recon_batch, data, mu, logvar)\n",
495 | " loss.backward()\n",
496 | " train_loss += loss.item()\n",
497 | " optimizer.step()\n",
498 | " if epoch % 200 == 0: \n",
499 | " print('====> Epoch: {} Average training loss: {:.4f}'.format(\n",
500 | " epoch, train_loss / len(trainloader.dataset)))\n",
501 | " train_losses.append(train_loss / len(trainloader.dataset))"
502 | ]
503 | },
504 | {
505 | "cell_type": "code",
506 | "execution_count": 236,
507 | "metadata": {},
508 | "outputs": [],
509 | "source": [
510 | "def test(epoch):\n",
511 | " with torch.no_grad():\n",
512 | " test_loss = 0\n",
513 | " for batch_idx, data in enumerate(testloader):\n",
514 | " data = data.to(device)\n",
515 | " optimizer.zero_grad()\n",
516 | " recon_batch, mu, logvar = model(data)\n",
517 | " loss = loss_mse(recon_batch, data, mu, logvar)\n",
518 | " test_loss += loss.item()\n",
519 | " if epoch % 200 == 0: \n",
520 | " print('====> Epoch: {} Average test loss: {:.4f}'.format(\n",
521 | " epoch, test_loss / len(testloader.dataset)))\n",
522 | " test_losses.append(test_loss / len(testloader.dataset))"
523 | ]
524 | },
525 | {
526 | "cell_type": "code",
527 | "execution_count": 237,
528 | "metadata": {},
529 | "outputs": [
530 | {
531 | "output_type": "stream",
532 | "name": "stdout",
533 | "text": [
534 | "====> Epoch: 200 Average training loss: 12.3501\n",
535 | "====> Epoch: 200 Average test loss: 11.7777\n",
536 | "====> Epoch: 400 Average training loss: 10.1168\n",
537 | "====> Epoch: 400 Average test loss: 8.9987\n",
538 | "====> Epoch: 600 Average training loss: 9.2956\n",
539 | "====> Epoch: 600 Average test loss: 9.3548\n",
540 | "====> Epoch: 800 Average training loss: 8.9570\n",
541 | "====> Epoch: 800 Average test loss: 8.9647\n",
542 | "====> Epoch: 1000 Average training loss: 8.6688\n",
543 | "====> Epoch: 1000 Average test loss: 8.5866\n",
544 | "====> Epoch: 1200 Average training loss: 8.3341\n",
545 | "====> Epoch: 1200 Average test loss: 8.8371\n",
546 | "====> Epoch: 1400 Average training loss: 8.4063\n",
547 | "====> Epoch: 1400 Average test loss: 8.7891\n"
548 | ]
549 | }
550 | ],
551 | "source": [
552 | "for epoch in range(1, epochs + 1):\n",
553 | " train(epoch)\n",
554 | " test(epoch)"
555 | ]
556 | },
557 | {
558 | "source": [
559 | "We we're able to reduce the training and test loss but quite a bit, let's have a look at how the fake results actually look like vs the real results:"
560 | ],
561 | "cell_type": "markdown",
562 | "metadata": {}
563 | },
564 | {
565 | "cell_type": "code",
566 | "execution_count": 238,
567 | "metadata": {},
568 | "outputs": [],
569 | "source": [
570 | "with torch.no_grad():\n",
571 | " for batch_idx, data in enumerate(testloader):\n",
572 | " data = data.to(device)\n",
573 | " optimizer.zero_grad()\n",
574 | " recon_batch, mu, logvar = model(data)"
575 | ]
576 | },
577 | {
578 | "cell_type": "code",
579 | "execution_count": 243,
580 | "metadata": {},
581 | "outputs": [],
582 | "source": [
583 | "scaler = trainloader.dataset.standardizer\n",
584 | "recon_row = scaler.inverse_transform(recon_batch[0].cpu().numpy())\n",
585 | "real_row = scaler.inverse_transform(testloader.dataset.x[0].cpu().numpy())"
586 | ]
587 | },
588 | {
589 | "cell_type": "code",
590 | "execution_count": 246,
591 | "metadata": {},
592 | "outputs": [
593 | {
594 | "output_type": "execute_result",
595 | "data": {
596 | "text/plain": [
597 | " Wine Alcohol Malic.acid Ash Acl Mg Phenols \\\n",
598 | "0 1.002792 13.535107 2.010303 2.557292 18.198132 112.606842 2.737524 \n",
599 | "1 1.000000 13.640000 3.100000 2.560000 15.200000 116.000000 2.700000 \n",
600 | "\n",
601 | " Flavanoids Nonflavanoid.phenols Proanth Color.int Hue OD \\\n",
602 | "0 2.807587 0.320866 1.738254 4.899318 1.078039 3.187276 \n",
603 | "1 3.030000 0.170000 1.660000 5.100000 0.960000 3.360000 \n",
604 | "\n",
605 | " Proline \n",
606 | "0 1013.391479 \n",
607 | "1 845.000000 "
608 | ],
609 | "text/html": "\n\n
\n \n \n | \n Wine | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n \n \n | 0 | \n 1.002792 | \n 13.535107 | \n 2.010303 | \n 2.557292 | \n 18.198132 | \n 112.606842 | \n 2.737524 | \n 2.807587 | \n 0.320866 | \n 1.738254 | \n 4.899318 | \n 1.078039 | \n 3.187276 | \n 1013.391479 | \n
\n \n | 1 | \n 1.000000 | \n 13.640000 | \n 3.100000 | \n 2.560000 | \n 15.200000 | \n 116.000000 | \n 2.700000 | \n 3.030000 | \n 0.170000 | \n 1.660000 | \n 5.100000 | \n 0.960000 | \n 3.360000 | \n 845.000000 | \n
\n \n
\n
"
610 | },
611 | "metadata": {},
612 | "execution_count": 246
613 | }
614 | ],
615 | "source": [
616 | "df = pd.DataFrame(np.stack((recon_row, real_row)), columns = cols)\n",
617 | "df"
618 | ]
619 | },
620 | {
621 | "source": [
622 | "Not to bad right (the first row is the reconstructed row, the second one the real row from the data)? However, what we want is to built this row not with the real input so to speak, since right now we were giving the model the complete rows with their 14 columns, condensed it to 3 input parameters, just to blow it up again to the corresponding 14 columns. What I want to do is to create these 14 rows by giving the model 3 latent factors as input. Let's have a look at these latent variables. "
623 | ],
624 | "cell_type": "markdown",
625 | "metadata": {}
626 | },
627 | {
628 | "cell_type": "code",
629 | "execution_count": 255,
630 | "metadata": {},
631 | "outputs": [],
632 | "source": [
633 | "sigma = torch.exp(logvar/2)"
634 | ]
635 | },
636 | {
637 | "cell_type": "code",
638 | "execution_count": 256,
639 | "metadata": {},
640 | "outputs": [
641 | {
642 | "output_type": "execute_result",
643 | "data": {
644 | "text/plain": [
645 | "(tensor([-0.9960, -0.8502, -0.0043]), tensor([0.2555, 0.4801, 0.9888]))"
646 | ]
647 | },
648 | "metadata": {},
649 | "execution_count": 256
650 | }
651 | ],
652 | "source": [
653 | "mu[1], sigma[1]"
654 | ]
655 | },
656 | {
657 | "source": [
658 | "Mu represents the mean for each of our latent factor values, logvar the log of the standard deviation. Each of these have a distribution by itself. We have 54 cases in our test data, so we have 3x54 different mu and logvar. We can have a look at the distribution of each of the 3 latent variables: "
659 | ],
660 | "cell_type": "markdown",
661 | "metadata": {}
662 | },
663 | {
664 | "cell_type": "code",
665 | "execution_count": 257,
666 | "metadata": {},
667 | "outputs": [
668 | {
669 | "output_type": "execute_result",
670 | "data": {
671 | "text/plain": [
672 | "(tensor([-0.0088, 0.0051, 0.0044]), tensor([0.4514, 0.3897, 0.9986]))"
673 | ]
674 | },
675 | "metadata": {},
676 | "execution_count": 257
677 | }
678 | ],
679 | "source": [
680 | "mu.mean(axis=0), sigma.mean(axis=0)"
681 | ]
682 | },
683 | {
684 | "source": [
685 | "All of the latent variables have a mean around zero, but the last latent factor has a wider standard deviation. So when we sample values from each of these latent variables, the last value will vary much more then the other two. I assume a normal distribution for all the latent factors."
686 | ],
687 | "cell_type": "markdown",
688 | "metadata": {}
689 | },
690 | {
691 | "cell_type": "code",
692 | "execution_count": 405,
693 | "metadata": {},
694 | "outputs": [],
695 | "source": [
696 | "# sample z from q\n",
697 | "no_samples = 20\n",
698 | "q = torch.distributions.Normal(mu.mean(axis=0), sigma.mean(axis=0))\n",
699 | "z = q.rsample(sample_shape=torch.Size([no_samples]))"
700 | ]
701 | },
702 | {
703 | "cell_type": "code",
704 | "execution_count": 406,
705 | "metadata": {},
706 | "outputs": [
707 | {
708 | "output_type": "execute_result",
709 | "data": {
710 | "text/plain": [
711 | "torch.Size([20, 3])"
712 | ]
713 | },
714 | "metadata": {},
715 | "execution_count": 406
716 | }
717 | ],
718 | "source": [
719 | "z.shape"
720 | ]
721 | },
722 | {
723 | "cell_type": "code",
724 | "execution_count": 446,
725 | "metadata": {},
726 | "outputs": [
727 | {
728 | "output_type": "execute_result",
729 | "data": {
730 | "text/plain": [
731 | "tensor([[ 0.5283, 0.4519, 0.6792],\n",
732 | " [ 0.3664, -0.5569, -0.1531],\n",
733 | " [-0.5802, 0.4394, 1.8406],\n",
734 | " [-1.0136, -0.4239, 0.4524],\n",
735 | " [-0.0605, 0.3913, 0.8030]])"
736 | ]
737 | },
738 | "metadata": {},
739 | "execution_count": 446
740 | }
741 | ],
742 | "source": [
743 | "z[:5]"
744 | ]
745 | },
746 | {
747 | "source": [
748 | "With these three latent factors we can now start and create fake data for our dataset and see how it looks like:"
749 | ],
750 | "cell_type": "markdown",
751 | "metadata": {}
752 | },
753 | {
754 | "cell_type": "code",
755 | "execution_count": 408,
756 | "metadata": {},
757 | "outputs": [],
758 | "source": [
759 | "with torch.no_grad():\n",
760 | " pred = model.decode(z).cpu().numpy()"
761 | ]
762 | },
763 | {
764 | "cell_type": "code",
765 | "execution_count": 409,
766 | "metadata": {},
767 | "outputs": [
768 | {
769 | "output_type": "execute_result",
770 | "data": {
771 | "text/plain": [
772 | "array([-0.24290268, -0.6087041 , -0.44325534, -0.7158908 , -0.15065292,\n",
773 | " -0.47845733, 0.26319185, 0.23732403, -0.22809544, 0.12187037,\n",
774 | " -0.8295655 , 0.44908378, 0.6173717 , -0.55648965], dtype=float32)"
775 | ]
776 | },
777 | "metadata": {},
778 | "execution_count": 409
779 | }
780 | ],
781 | "source": [
782 | "pred[1]"
783 | ]
784 | },
785 | {
786 | "source": [
787 | "## Create fake data from Autoencoder"
788 | ],
789 | "cell_type": "markdown",
790 | "metadata": {}
791 | },
792 | {
793 | "cell_type": "code",
794 | "execution_count": 420,
795 | "metadata": {},
796 | "outputs": [
797 | {
798 | "output_type": "execute_result",
799 | "data": {
800 | "text/plain": [
801 | "(20, 14)"
802 | ]
803 | },
804 | "metadata": {},
805 | "execution_count": 420
806 | }
807 | ],
808 | "source": [
809 | "fake_data = scaler.inverse_transform(pred)\n",
810 | "fake_data.shape"
811 | ]
812 | },
813 | {
814 | "cell_type": "code",
815 | "execution_count": 439,
816 | "metadata": {},
817 | "outputs": [
818 | {
819 | "output_type": "execute_result",
820 | "data": {
821 | "text/plain": [
822 | " Wine Alcohol Malic.acid Ash Acl Mg Phenols \\\n",
823 | "0 3 13.350755 3.817283 2.425754 21.229387 98.816788 1.682916 \n",
824 | "1 2 12.453159 1.916350 2.172731 18.977226 93.556114 2.444676 \n",
825 | "2 2 12.735057 2.404566 2.447556 20.400013 105.475235 1.937112 \n",
826 | "3 1 14.664644 1.517465 2.269279 12.428186 88.851791 3.354010 \n",
827 | "4 3 13.160161 3.359397 2.415784 21.050211 99.859154 1.662516 \n",
828 | "5 2 12.453159 1.916350 2.172731 18.977226 93.556114 2.444676 \n",
829 | "6 2 12.520310 2.522696 2.375254 20.435560 92.619812 1.838333 \n",
830 | "7 3 12.877177 2.746192 2.395865 20.154610 97.263092 1.744550 \n",
831 | "8 2 12.679532 2.344776 2.331834 19.901327 97.031586 1.857117 \n",
832 | "9 2 13.062141 2.719065 2.461590 19.947014 103.352890 2.070540 \n",
833 | "\n",
834 | " Flavanoids Nonflavanoid.phenols Proanth Color.int Hue OD \\\n",
835 | "0 0.910786 0.450081 1.245882 8.242197 0.667928 1.705379 \n",
836 | "1 2.246270 0.335432 1.663583 3.166457 1.063876 3.050176 \n",
837 | "2 1.657119 0.385740 1.452577 4.242754 0.928397 2.467263 \n",
838 | "3 3.997237 0.265253 2.586414 7.366968 1.275564 3.170231 \n",
839 | "4 0.929189 0.427978 1.135361 7.101127 0.708510 1.732820 \n",
840 | "5 2.246270 0.335432 1.663583 3.166457 1.063876 3.050176 \n",
841 | "6 1.361269 0.470815 1.221076 4.518130 0.906680 2.146883 \n",
842 | "7 1.187050 0.464942 1.160733 5.619783 0.836708 1.871472 \n",
843 | "8 1.495742 0.461352 1.239715 4.668478 0.934352 2.094139 \n",
844 | "9 1.566055 0.380154 1.293219 5.675068 0.852832 2.128047 \n",
845 | "\n",
846 | " Proline \n",
847 | "0 636.650818 \n",
848 | "1 568.385925 \n",
849 | "2 680.271545 \n",
850 | "3 1516.662720 \n",
851 | "4 640.412231 \n",
852 | "5 568.385925 \n",
853 | "6 583.079102 \n",
854 | "7 665.485718 \n",
855 | "8 680.778809 \n",
856 | "9 778.582825 "
857 | ],
858 | "text/html": "\n\n
\n \n \n | \n Wine | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n \n \n | 0 | \n 3 | \n 13.350755 | \n 3.817283 | \n 2.425754 | \n 21.229387 | \n 98.816788 | \n 1.682916 | \n 0.910786 | \n 0.450081 | \n 1.245882 | \n 8.242197 | \n 0.667928 | \n 1.705379 | \n 636.650818 | \n
\n \n | 1 | \n 2 | \n 12.453159 | \n 1.916350 | \n 2.172731 | \n 18.977226 | \n 93.556114 | \n 2.444676 | \n 2.246270 | \n 0.335432 | \n 1.663583 | \n 3.166457 | \n 1.063876 | \n 3.050176 | \n 568.385925 | \n
\n \n | 2 | \n 2 | \n 12.735057 | \n 2.404566 | \n 2.447556 | \n 20.400013 | \n 105.475235 | \n 1.937112 | \n 1.657119 | \n 0.385740 | \n 1.452577 | \n 4.242754 | \n 0.928397 | \n 2.467263 | \n 680.271545 | \n
\n \n | 3 | \n 1 | \n 14.664644 | \n 1.517465 | \n 2.269279 | \n 12.428186 | \n 88.851791 | \n 3.354010 | \n 3.997237 | \n 0.265253 | \n 2.586414 | \n 7.366968 | \n 1.275564 | \n 3.170231 | \n 1516.662720 | \n
\n \n | 4 | \n 3 | \n 13.160161 | \n 3.359397 | \n 2.415784 | \n 21.050211 | \n 99.859154 | \n 1.662516 | \n 0.929189 | \n 0.427978 | \n 1.135361 | \n 7.101127 | \n 0.708510 | \n 1.732820 | \n 640.412231 | \n
\n \n | 5 | \n 2 | \n 12.453159 | \n 1.916350 | \n 2.172731 | \n 18.977226 | \n 93.556114 | \n 2.444676 | \n 2.246270 | \n 0.335432 | \n 1.663583 | \n 3.166457 | \n 1.063876 | \n 3.050176 | \n 568.385925 | \n
\n \n | 6 | \n 2 | \n 12.520310 | \n 2.522696 | \n 2.375254 | \n 20.435560 | \n 92.619812 | \n 1.838333 | \n 1.361269 | \n 0.470815 | \n 1.221076 | \n 4.518130 | \n 0.906680 | \n 2.146883 | \n 583.079102 | \n
\n \n | 7 | \n 3 | \n 12.877177 | \n 2.746192 | \n 2.395865 | \n 20.154610 | \n 97.263092 | \n 1.744550 | \n 1.187050 | \n 0.464942 | \n 1.160733 | \n 5.619783 | \n 0.836708 | \n 1.871472 | \n 665.485718 | \n
\n \n | 8 | \n 2 | \n 12.679532 | \n 2.344776 | \n 2.331834 | \n 19.901327 | \n 97.031586 | \n 1.857117 | \n 1.495742 | \n 0.461352 | \n 1.239715 | \n 4.668478 | \n 0.934352 | \n 2.094139 | \n 680.778809 | \n
\n \n | 9 | \n 2 | \n 13.062141 | \n 2.719065 | \n 2.461590 | \n 19.947014 | \n 103.352890 | \n 2.070540 | \n 1.566055 | \n 0.380154 | \n 1.293219 | \n 5.675068 | \n 0.852832 | \n 2.128047 | \n 778.582825 | \n
\n \n
\n
"
859 | },
860 | "metadata": {},
861 | "execution_count": 439
862 | }
863 | ],
864 | "source": [
865 | "df_fake = pd.DataFrame(fake_data, columns = cols)\n",
866 | "df_fake['Wine'] = np.round(df_fake['Wine']).astype(int)\n",
867 | "df_fake['Wine'] = np.where(df_fake['Wine']<1, 1, df_fake['Wine'])\n",
868 | "df_fake.head(10)"
869 | ]
870 | },
871 | {
872 | "source": [
873 | "For comparison the real data:"
874 | ],
875 | "cell_type": "markdown",
876 | "metadata": {}
877 | },
878 | {
879 | "cell_type": "code",
880 | "execution_count": 444,
881 | "metadata": {},
882 | "outputs": [
883 | {
884 | "output_type": "execute_result",
885 | "data": {
886 | "text/plain": [
887 | " Wine Alcohol Malic.acid Ash Acl Mg Phenols Flavanoids \\\n",
888 | "1 1 13.20 1.78 2.14 11.2 100 2.65 2.76 \n",
889 | "35 1 13.48 1.81 2.41 20.5 100 2.70 2.98 \n",
890 | "114 2 12.08 1.39 2.50 22.5 84 2.56 2.29 \n",
891 | "149 3 13.08 3.90 2.36 21.5 113 1.41 1.39 \n",
892 | "158 3 14.34 1.68 2.70 25.0 98 2.80 1.31 \n",
893 | "9 1 13.86 1.35 2.27 16.0 98 2.98 3.15 \n",
894 | "90 2 12.08 1.83 2.32 18.5 81 1.60 1.50 \n",
895 | "47 1 13.90 1.68 2.12 16.0 101 3.10 3.39 \n",
896 | "10 1 14.10 2.16 2.30 18.0 105 2.95 3.32 \n",
897 | "31 1 13.58 1.66 2.36 19.1 106 2.86 3.19 \n",
898 | "\n",
899 | " Nonflavanoid.phenols Proanth Color.int Hue OD Proline \n",
900 | "1 0.26 1.28 4.38 1.05 3.40 1050 \n",
901 | "35 0.26 1.86 5.10 1.04 3.47 920 \n",
902 | "114 0.43 1.04 2.90 0.93 3.19 385 \n",
903 | "149 0.34 1.14 9.40 0.57 1.33 550 \n",
904 | "158 0.53 2.70 13.00 0.57 1.96 660 \n",
905 | "9 0.22 1.85 7.22 1.01 3.55 1045 \n",
906 | "90 0.52 1.64 2.40 1.08 2.27 480 \n",
907 | "47 0.21 2.14 6.10 0.91 3.33 985 \n",
908 | "10 0.22 2.38 5.75 1.25 3.17 1510 \n",
909 | "31 0.22 1.95 6.90 1.09 2.88 1515 "
910 | ],
911 | "text/html": "\n\n
\n \n \n | \n Wine | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n \n \n | 1 | \n 1 | \n 13.20 | \n 1.78 | \n 2.14 | \n 11.2 | \n 100 | \n 2.65 | \n 2.76 | \n 0.26 | \n 1.28 | \n 4.38 | \n 1.05 | \n 3.40 | \n 1050 | \n
\n \n | 35 | \n 1 | \n 13.48 | \n 1.81 | \n 2.41 | \n 20.5 | \n 100 | \n 2.70 | \n 2.98 | \n 0.26 | \n 1.86 | \n 5.10 | \n 1.04 | \n 3.47 | \n 920 | \n
\n \n | 114 | \n 2 | \n 12.08 | \n 1.39 | \n 2.50 | \n 22.5 | \n 84 | \n 2.56 | \n 2.29 | \n 0.43 | \n 1.04 | \n 2.90 | \n 0.93 | \n 3.19 | \n 385 | \n
\n \n | 149 | \n 3 | \n 13.08 | \n 3.90 | \n 2.36 | \n 21.5 | \n 113 | \n 1.41 | \n 1.39 | \n 0.34 | \n 1.14 | \n 9.40 | \n 0.57 | \n 1.33 | \n 550 | \n
\n \n | 158 | \n 3 | \n 14.34 | \n 1.68 | \n 2.70 | \n 25.0 | \n 98 | \n 2.80 | \n 1.31 | \n 0.53 | \n 2.70 | \n 13.00 | \n 0.57 | \n 1.96 | \n 660 | \n
\n \n | 9 | \n 1 | \n 13.86 | \n 1.35 | \n 2.27 | \n 16.0 | \n 98 | \n 2.98 | \n 3.15 | \n 0.22 | \n 1.85 | \n 7.22 | \n 1.01 | \n 3.55 | \n 1045 | \n
\n \n | 90 | \n 2 | \n 12.08 | \n 1.83 | \n 2.32 | \n 18.5 | \n 81 | \n 1.60 | \n 1.50 | \n 0.52 | \n 1.64 | \n 2.40 | \n 1.08 | \n 2.27 | \n 480 | \n
\n \n | 47 | \n 1 | \n 13.90 | \n 1.68 | \n 2.12 | \n 16.0 | \n 101 | \n 3.10 | \n 3.39 | \n 0.21 | \n 2.14 | \n 6.10 | \n 0.91 | \n 3.33 | \n 985 | \n
\n \n | 10 | \n 1 | \n 14.10 | \n 2.16 | \n 2.30 | \n 18.0 | \n 105 | \n 2.95 | \n 3.32 | \n 0.22 | \n 2.38 | \n 5.75 | \n 1.25 | \n 3.17 | \n 1510 | \n
\n \n | 31 | \n 1 | \n 13.58 | \n 1.66 | \n 2.36 | \n 19.1 | \n 106 | \n 2.86 | \n 3.19 | \n 0.22 | \n 1.95 | \n 6.90 | \n 1.09 | \n 2.88 | \n 1515 | \n
\n \n
\n
"
912 | },
913 | "metadata": {},
914 | "execution_count": 444
915 | }
916 | ],
917 | "source": [
918 | "df_base.sample(10)"
919 | ]
920 | },
921 | {
922 | "source": [
923 | "## Compare variables grouped by Wine"
924 | ],
925 | "cell_type": "markdown",
926 | "metadata": {}
927 | },
928 | {
929 | "cell_type": "code",
930 | "execution_count": 443,
931 | "metadata": {},
932 | "outputs": [
933 | {
934 | "output_type": "execute_result",
935 | "data": {
936 | "text/plain": [
937 | " Alcohol Malic.acid Ash Acl Mg Phenols \\\n",
938 | "Wine \n",
939 | "1 13.744746 2.010678 2.455593 17.037288 106.338983 2.840169 \n",
940 | "2 12.278732 1.932676 2.244789 20.238028 94.549296 2.258873 \n",
941 | "3 13.153750 3.333750 2.437083 21.416667 99.312500 1.678750 \n",
942 | "\n",
943 | " Flavanoids Nonflavanoid.phenols Proanth Color.int Hue \\\n",
944 | "Wine \n",
945 | "1 2.982373 0.290000 1.899322 5.528305 1.062034 \n",
946 | "2 2.080845 0.363662 1.630282 3.086620 1.056282 \n",
947 | "3 0.781458 0.447500 1.153542 7.396250 0.682708 \n",
948 | "\n",
949 | " OD Proline \n",
950 | "Wine \n",
951 | "1 3.157797 1115.711864 \n",
952 | "2 2.785352 519.507042 \n",
953 | "3 1.683542 629.895833 "
954 | ],
955 | "text/html": "\n\n
\n \n \n | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n | Wine | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n
\n \n \n \n | 1 | \n 13.744746 | \n 2.010678 | \n 2.455593 | \n 17.037288 | \n 106.338983 | \n 2.840169 | \n 2.982373 | \n 0.290000 | \n 1.899322 | \n 5.528305 | \n 1.062034 | \n 3.157797 | \n 1115.711864 | \n
\n \n | 2 | \n 12.278732 | \n 1.932676 | \n 2.244789 | \n 20.238028 | \n 94.549296 | \n 2.258873 | \n 2.080845 | \n 0.363662 | \n 1.630282 | \n 3.086620 | \n 1.056282 | \n 2.785352 | \n 519.507042 | \n
\n \n | 3 | \n 13.153750 | \n 3.333750 | \n 2.437083 | \n 21.416667 | \n 99.312500 | \n 1.678750 | \n 0.781458 | \n 0.447500 | \n 1.153542 | \n 7.396250 | \n 0.682708 | \n 1.683542 | \n 629.895833 | \n
\n \n
\n
"
956 | },
957 | "metadata": {},
958 | "execution_count": 443
959 | }
960 | ],
961 | "source": [
962 | "df_base.groupby('Wine').mean()"
963 | ]
964 | },
965 | {
966 | "cell_type": "code",
967 | "execution_count": 445,
968 | "metadata": {},
969 | "outputs": [
970 | {
971 | "output_type": "execute_result",
972 | "data": {
973 | "text/plain": [
974 | " Alcohol Malic.acid Ash Acl Mg Phenols \\\n",
975 | "Wine \n",
976 | "1 13.812141 1.814212 2.482638 17.172688 107.468864 3.062387 \n",
977 | "2 12.560544 2.157595 2.301805 19.696327 99.324005 2.254415 \n",
978 | "3 13.170316 3.413856 2.416369 20.929930 99.028229 1.683604 \n",
979 | "\n",
980 | " Flavanoids Nonflavanoid.phenols Proanth Color.int Hue \\\n",
981 | "Wine \n",
982 | "1 3.344664 0.259955 2.162966 5.331643 1.147217 \n",
983 | "2 1.995140 0.366076 1.575015 3.791955 1.000527 \n",
984 | "3 0.964315 0.443444 1.176529 7.288512 0.718357 \n",
985 | "\n",
986 | " OD Proline \n",
987 | "Wine \n",
988 | "1 3.280716 1148.031372 \n",
989 | "2 2.741598 629.895203 \n",
990 | "3 1.745200 644.870056 "
991 | ],
992 | "text/html": "\n\n
\n \n \n | \n Alcohol | \n Malic.acid | \n Ash | \n Acl | \n Mg | \n Phenols | \n Flavanoids | \n Nonflavanoid.phenols | \n Proanth | \n Color.int | \n Hue | \n OD | \n Proline | \n
\n \n | Wine | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n | \n
\n \n \n \n | 1 | \n 13.812141 | \n 1.814212 | \n 2.482638 | \n 17.172688 | \n 107.468864 | \n 3.062387 | \n 3.344664 | \n 0.259955 | \n 2.162966 | \n 5.331643 | \n 1.147217 | \n 3.280716 | \n 1148.031372 | \n
\n \n | 2 | \n 12.560544 | \n 2.157595 | \n 2.301805 | \n 19.696327 | \n 99.324005 | \n 2.254415 | \n 1.995140 | \n 0.366076 | \n 1.575015 | \n 3.791955 | \n 1.000527 | \n 2.741598 | \n 629.895203 | \n
\n \n | 3 | \n 13.170316 | \n 3.413856 | \n 2.416369 | \n 20.929930 | \n 99.028229 | \n 1.683604 | \n 0.964315 | \n 0.443444 | \n 1.176529 | \n 7.288512 | \n 0.718357 | \n 1.745200 | \n 644.870056 | \n
\n \n
\n
"
993 | },
994 | "metadata": {},
995 | "execution_count": 445
996 | }
997 | ],
998 | "source": [
999 | "df_fake.groupby('Wine').mean()"
1000 | ]
1001 | },
1002 | {
1003 | "source": [
1004 | "That looks pretty convincing if you ask me. \n",
1005 | "\n",
1006 | "To sum up, we've built a variational autoencoder, which we trained on our trainingset. We checked whether our loss kept on improving based on the testset, which the autoencoder never saw for generating fake data. We then calculated the mean and standard deviation from our latent factors given the test data. We've then sampled from this distribution to feed it back into our decoder to create some fake data. With this approach I am now able to create as much fake data derived from the underlying distribution as a want. And I think the results look promising. \n",
1007 | "\n",
1008 | "You can take this approach to for example create data from under-represented in highly skewed datasets instead of just weighting them higher. The re-weighting approach might cause the algorithm to find relations where there are none, only because a few then overrepresented data points share this relation by random. With the shown approach, the learned distribution would take into account the high variance these features have and therefore will hopefully help the algorithm to not draw these false conclusions.\n",
1009 | "\n",
1010 | "Stay tuned for the next blogpost, where I will show the shown approach in exactly this use case."
1011 | ],
1012 | "cell_type": "markdown",
1013 | "metadata": {}
1014 | },
1015 | {
1016 | "cell_type": "code",
1017 | "execution_count": null,
1018 | "metadata": {},
1019 | "outputs": [],
1020 | "source": []
1021 | }
1022 | ]
1023 | }
--------------------------------------------------------------------------------
/Create_Autoencoder_Model_Basemodel_3Embeddings.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Variational Autoencoder on Tabular Data"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "I use [wine dataset](https://archive.ics.uci.edu/ml/datasets/wine) to show how Variational Autoencoder (VAE) with PyTorch on tabular data works. I use the VAE to reduce the dimensionality of dataset, in this case don to 3 Variables (embeddings). I then plot the embeddings in a 3D graph to show how VAE is similar to a PCA but works in a non-linear way. "
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "# Imports"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 1,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "import torch\n",
31 | "import torch.nn as nn\n",
32 | "import torch.nn.functional as F\n",
33 | "from torch import nn, optim\n",
34 | "from torch.autograd import Variable\n",
35 | "\n",
36 | "import pandas as pd\n",
37 | "import numpy as np\n",
38 | "from sklearn import preprocessing"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": 2,
44 | "metadata": {},
45 | "outputs": [
46 | {
47 | "data": {
48 | "text/plain": [
49 | "device(type='cpu')"
50 | ]
51 | },
52 | "execution_count": 2,
53 | "metadata": {},
54 | "output_type": "execute_result"
55 | }
56 | ],
57 | "source": [
58 | "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
59 | "device"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "# Define Path to Dataset"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 3,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "DATA_PATH = 'Data/wine.csv'"
76 | ]
77 | },
78 | {
79 | "cell_type": "markdown",
80 | "metadata": {},
81 | "source": [
82 | "# Define Functions"
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": 4,
88 | "metadata": {},
89 | "outputs": [],
90 | "source": [
91 | "def load_data(path):\n",
92 | " # read in from csv\n",
93 | " df = pd.read_csv(path, sep=',')\n",
94 | " # replace nan with -99\n",
95 | " df = df.fillna(-99)\n",
96 | " df_base = df.iloc[:, 1:]\n",
97 | " df_wine = df.iloc[:,0].values\n",
98 | " x = df_base.values.reshape(-1, df_base.shape[1]).astype('float32')\n",
99 | " # stadardize values\n",
100 | " standardizer = preprocessing.StandardScaler()\n",
101 | " x_train = standardizer.fit_transform(x)\n",
102 | " x_train = torch.from_numpy(x_train).to(device)\n",
103 | " return x_train, standardizer, df_wine"
104 | ]
105 | },
106 | {
107 | "cell_type": "markdown",
108 | "metadata": {},
109 | "source": [
110 | "# Build DataLoader"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": 5,
116 | "metadata": {},
117 | "outputs": [],
118 | "source": [
119 | "from torch.utils.data import Dataset, DataLoader\n",
120 | "class DataBuilder(Dataset):\n",
121 | " def __init__(self, path):\n",
122 | " self.x, self.standardizer, self.wine = load_data(DATA_PATH)\n",
123 | " self.len=self.x.shape[0]\n",
124 | " def __getitem__(self,index): \n",
125 | " return self.x[index]\n",
126 | " def __len__(self):\n",
127 | " return self.len"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": 6,
133 | "metadata": {},
134 | "outputs": [],
135 | "source": [
136 | "data_set=DataBuilder(DATA_PATH)\n",
137 | "trainloader=DataLoader(dataset=data_set,batch_size=1024)"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 7,
143 | "metadata": {},
144 | "outputs": [
145 | {
146 | "data": {
147 | "text/plain": [
148 | "torch.Tensor"
149 | ]
150 | },
151 | "execution_count": 7,
152 | "metadata": {},
153 | "output_type": "execute_result"
154 | }
155 | ],
156 | "source": [
157 | "type(trainloader.dataset.x)"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 8,
163 | "metadata": {},
164 | "outputs": [
165 | {
166 | "data": {
167 | "text/plain": [
168 | "tensor([[ 1.5186, -0.5622, 0.2321, ..., 0.3622, 1.8479, 1.0130],\n",
169 | " [ 0.2463, -0.4994, -0.8280, ..., 0.4061, 1.1134, 0.9652],\n",
170 | " [ 0.1969, 0.0212, 1.1093, ..., 0.3183, 0.7886, 1.3951],\n",
171 | " ...,\n",
172 | " [ 0.3328, 1.7447, -0.3894, ..., -1.6121, -1.4854, 0.2806],\n",
173 | " [ 0.2092, 0.2277, 0.0127, ..., -1.5683, -1.4007, 0.2965],\n",
174 | " [ 1.3951, 1.5832, 1.3652, ..., -1.5244, -1.4289, -0.5952]])"
175 | ]
176 | },
177 | "execution_count": 8,
178 | "metadata": {},
179 | "output_type": "execute_result"
180 | }
181 | ],
182 | "source": [
183 | "data_set.x"
184 | ]
185 | },
186 | {
187 | "cell_type": "markdown",
188 | "metadata": {},
189 | "source": [
190 | "# Build Model and train it"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": 9,
196 | "metadata": {},
197 | "outputs": [],
198 | "source": [
199 | "class Autoencoder(nn.Module):\n",
200 | " def __init__(self,D_in,H=50,H2=12,latent_dim=3):\n",
201 | " \n",
202 | " #Encoder\n",
203 | " super(Autoencoder,self).__init__()\n",
204 | " self.linear1=nn.Linear(D_in,H)\n",
205 | " self.lin_bn1 = nn.BatchNorm1d(num_features=H)\n",
206 | " self.linear2=nn.Linear(H,H2)\n",
207 | " self.lin_bn2 = nn.BatchNorm1d(num_features=H2)\n",
208 | " self.linear3=nn.Linear(H2,H2)\n",
209 | " self.lin_bn3 = nn.BatchNorm1d(num_features=H2)\n",
210 | " \n",
211 | "# # Latent vectors mu and sigma\n",
212 | " self.fc1 = nn.Linear(H2, latent_dim)\n",
213 | "# self.bn1 = nn.BatchNorm1d(num_features=latent_dim)\n",
214 | " self.fc21 = nn.Linear(latent_dim, latent_dim)\n",
215 | " self.fc22 = nn.Linear(latent_dim, latent_dim)\n",
216 | "\n",
217 | "# # Sampling vector\n",
218 | " self.fc3 = nn.Linear(latent_dim, latent_dim)\n",
219 | "# self.fc_bn3 = nn.BatchNorm1d(latent_dim)\n",
220 | " self.fc4 = nn.Linear(latent_dim, H2)\n",
221 | "# self.fc_bn4 = nn.BatchNorm1d(H2)\n",
222 | " \n",
223 | "# # Decoder\n",
224 | " self.linear4=nn.Linear(H2,H2)\n",
225 | " self.lin_bn4 = nn.BatchNorm1d(num_features=H2)\n",
226 | " self.linear5=nn.Linear(H2,H)\n",
227 | " self.lin_bn5 = nn.BatchNorm1d(num_features=H)\n",
228 | " self.linear6=nn.Linear(H,D_in)\n",
229 | " self.lin_bn6 = nn.BatchNorm1d(num_features=D_in)\n",
230 | " \n",
231 | " self.relu = nn.ReLU()\n",
232 | " \n",
233 | " def encode(self, x):\n",
234 | " lin1 = self.relu(self.lin_bn1(self.linear1(x)))\n",
235 | " lin2 = self.relu(self.lin_bn2(self.linear2(lin1)))\n",
236 | " lin3 = self.relu(self.lin_bn3(self.linear3(lin2)))\n",
237 | "\n",
238 | " fc1 = F.relu(self.fc1(lin3))\n",
239 | "\n",
240 | " r1 = self.fc21(fc1)\n",
241 | " r2 = self.fc22(fc1)\n",
242 | " \n",
243 | " return r1, r2\n",
244 | " \n",
245 | " def reparameterize(self, mu, logvar):\n",
246 | " if self.training:\n",
247 | " std = logvar.mul(0.5).exp_()\n",
248 | " eps = Variable(std.data.new(std.size()).normal_())\n",
249 | " return eps.mul(std).add_(mu)\n",
250 | " else:\n",
251 | " return mu\n",
252 | " \n",
253 | " def decode(self, z):\n",
254 | " fc3 = self.relu(self.fc3(z))\n",
255 | " fc4 = self.relu(self.fc4(fc3))#.view(128, -1)\n",
256 | "\n",
257 | " lin4 = self.relu(self.lin_bn4(self.linear4(fc4)))\n",
258 | " lin5 = self.relu(self.lin_bn5(self.linear5(lin4)))\n",
259 | " return self.lin_bn6(self.linear6(lin5))\n",
260 | "\n",
261 | "\n",
262 | " \n",
263 | " def forward(self, x):\n",
264 | " mu, logvar = self.encode(x)\n",
265 | " z = self.reparameterize(mu, logvar)\n",
266 | " # self.decode(z) ist später recon_batch, mu ist mu und logvar ist logvar\n",
267 | " return self.decode(z), mu, logvar"
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "execution_count": 10,
273 | "metadata": {},
274 | "outputs": [],
275 | "source": [
276 | "class customLoss(nn.Module):\n",
277 | " def __init__(self):\n",
278 | " super(customLoss, self).__init__()\n",
279 | " self.mse_loss = nn.MSELoss(reduction=\"sum\")\n",
280 | " \n",
281 | " # x_recon ist der im forward im Model erstellte recon_batch, x ist der originale x Batch, mu ist mu und logvar ist logvar \n",
282 | " def forward(self, x_recon, x, mu, logvar):\n",
283 | " loss_MSE = self.mse_loss(x_recon, x)\n",
284 | " loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n",
285 | "\n",
286 | " return loss_MSE + loss_KLD"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": 11,
292 | "metadata": {},
293 | "outputs": [],
294 | "source": [
295 | "# takes in a module and applies the specified weight initialization\n",
296 | "def weights_init_uniform_rule(m):\n",
297 | " classname = m.__class__.__name__\n",
298 | " # for every Linear layer in a model..\n",
299 | " if classname.find('Linear') != -1:\n",
300 | " # get the number of the inputs\n",
301 | " n = m.in_features\n",
302 | " y = 1.0/np.sqrt(n)\n",
303 | " m.weight.data.uniform_(-y, y)\n",
304 | " m.bias.data.fill_(0)"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": 12,
310 | "metadata": {},
311 | "outputs": [],
312 | "source": [
313 | "D_in = data_set.x.shape[1]\n",
314 | "H = 50\n",
315 | "H2 = 12\n",
316 | "model = Autoencoder(D_in, H, H2).to(device)\n",
317 | "#model.apply(weights_init_uniform_rule)\n",
318 | "#sae.fc4.register_forward_hook(get_activation('fc4'))\n",
319 | "optimizer = optim.Adam(model.parameters(), lr=1e-3)"
320 | ]
321 | },
322 | {
323 | "cell_type": "code",
324 | "execution_count": 13,
325 | "metadata": {},
326 | "outputs": [],
327 | "source": [
328 | "loss_mse = customLoss()"
329 | ]
330 | },
331 | {
332 | "cell_type": "markdown",
333 | "metadata": {},
334 | "source": [
335 | "# Train"
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": 14,
341 | "metadata": {},
342 | "outputs": [],
343 | "source": [
344 | "epochs = 2000\n",
345 | "log_interval = 50\n",
346 | "val_losses = []\n",
347 | "train_losses = []"
348 | ]
349 | },
350 | {
351 | "cell_type": "code",
352 | "execution_count": 15,
353 | "metadata": {},
354 | "outputs": [],
355 | "source": [
356 | "def train(epoch):\n",
357 | " model.train()\n",
358 | " train_loss = 0\n",
359 | " for batch_idx, data in enumerate(trainloader):\n",
360 | " data = data.to(device)\n",
361 | " optimizer.zero_grad()\n",
362 | " recon_batch, mu, logvar = model(data)\n",
363 | " loss = loss_mse(recon_batch, data, mu, logvar)\n",
364 | " loss.backward()\n",
365 | " train_loss += loss.item()\n",
366 | " optimizer.step()\n",
367 | "# if batch_idx % log_interval == 0:\n",
368 | "# print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
369 | "# epoch, batch_idx * len(data), len(trainloader.dataset),\n",
370 | "# 100. * batch_idx / len(trainloader),\n",
371 | "# loss.item() / len(data)))\n",
372 | " if epoch % 200 == 0: \n",
373 | " print('====> Epoch: {} Average loss: {:.4f}'.format(\n",
374 | " epoch, train_loss / len(trainloader.dataset)))\n",
375 | " train_losses.append(train_loss / len(trainloader.dataset))"
376 | ]
377 | },
378 | {
379 | "cell_type": "code",
380 | "execution_count": 16,
381 | "metadata": {
382 | "scrolled": true
383 | },
384 | "outputs": [
385 | {
386 | "name": "stdout",
387 | "output_type": "stream",
388 | "text": [
389 | "====> Epoch: 200 Average loss: 11.2040\n",
390 | "====> Epoch: 400 Average loss: 9.4026\n",
391 | "====> Epoch: 600 Average loss: 8.1786\n",
392 | "====> Epoch: 800 Average loss: 7.7224\n",
393 | "====> Epoch: 1000 Average loss: 7.6587\n",
394 | "====> Epoch: 1200 Average loss: 7.4626\n",
395 | "====> Epoch: 1400 Average loss: 7.4643\n",
396 | "====> Epoch: 1600 Average loss: 7.3207\n",
397 | "====> Epoch: 1800 Average loss: 7.0685\n",
398 | "====> Epoch: 2000 Average loss: 7.2222\n"
399 | ]
400 | }
401 | ],
402 | "source": [
403 | "for epoch in range(1, epochs + 1):\n",
404 | " train(epoch)"
405 | ]
406 | },
407 | {
408 | "cell_type": "markdown",
409 | "metadata": {},
410 | "source": [
411 | "# Evaluate"
412 | ]
413 | },
414 | {
415 | "cell_type": "code",
416 | "execution_count": 17,
417 | "metadata": {},
418 | "outputs": [],
419 | "source": [
420 | "standardizer = trainloader.dataset.standardizer"
421 | ]
422 | },
423 | {
424 | "cell_type": "code",
425 | "execution_count": 18,
426 | "metadata": {},
427 | "outputs": [],
428 | "source": [
429 | "model.eval()\n",
430 | "test_loss = 0\n",
431 | "# no_grad() bedeutet wir nehmen die vorher berechneten Gewichte und erneuern sie nicht\n",
432 | "with torch.no_grad():\n",
433 | " for i, data in enumerate(trainloader):\n",
434 | " data = data.to(device)\n",
435 | " recon_batch, mu, logvar = model(data)"
436 | ]
437 | },
438 | {
439 | "cell_type": "code",
440 | "execution_count": 19,
441 | "metadata": {},
442 | "outputs": [
443 | {
444 | "data": {
445 | "text/plain": [
446 | "array([1.34402313e+01, 1.96345377e+00, 2.62067842e+00, 1.87440109e+01,\n",
447 | " 1.07427719e+02, 2.63568044e+00, 2.69742918e+00, 3.28377843e-01,\n",
448 | " 1.67999256e+00, 4.58271646e+00, 1.10575414e+00, 3.02566600e+00,\n",
449 | " 1.01869727e+03], dtype=float32)"
450 | ]
451 | },
452 | "execution_count": 19,
453 | "metadata": {},
454 | "output_type": "execute_result"
455 | }
456 | ],
457 | "source": [
458 | "standardizer.inverse_transform(recon_batch[65].cpu().numpy())"
459 | ]
460 | },
461 | {
462 | "cell_type": "code",
463 | "execution_count": 20,
464 | "metadata": {},
465 | "outputs": [
466 | {
467 | "data": {
468 | "text/plain": [
469 | "array([1.237e+01, 1.210e+00, 2.560e+00, 1.810e+01, 9.800e+01, 2.420e+00,\n",
470 | " 2.650e+00, 3.700e-01, 2.080e+00, 4.600e+00, 1.190e+00, 2.300e+00,\n",
471 | " 6.780e+02], dtype=float32)"
472 | ]
473 | },
474 | "execution_count": 20,
475 | "metadata": {},
476 | "output_type": "execute_result"
477 | }
478 | ],
479 | "source": [
480 | "standardizer.inverse_transform(data[65].cpu().numpy())"
481 | ]
482 | },
483 | {
484 | "cell_type": "markdown",
485 | "metadata": {},
486 | "source": [
487 | "# Get Embeddings"
488 | ]
489 | },
490 | {
491 | "cell_type": "code",
492 | "execution_count": 21,
493 | "metadata": {},
494 | "outputs": [],
495 | "source": [
496 | "mu_output = []\n",
497 | "logvar_output = []\n",
498 | "\n",
499 | "with torch.no_grad():\n",
500 | " for i, (data) in enumerate(trainloader):\n",
501 | " data = data.to(device)\n",
502 | " optimizer.zero_grad()\n",
503 | " recon_batch, mu, logvar = model(data)\n",
504 | "\n",
505 | " \n",
506 | " mu_tensor = mu \n",
507 | " mu_output.append(mu_tensor)\n",
508 | " mu_result = torch.cat(mu_output, dim=0)\n",
509 | "\n",
510 | " logvar_tensor = logvar \n",
511 | " logvar_output.append(logvar_tensor)\n",
512 | " logvar_result = torch.cat(logvar_output, dim=0)"
513 | ]
514 | },
515 | {
516 | "cell_type": "code",
517 | "execution_count": 22,
518 | "metadata": {},
519 | "outputs": [
520 | {
521 | "data": {
522 | "text/plain": [
523 | "torch.Size([178, 3])"
524 | ]
525 | },
526 | "execution_count": 22,
527 | "metadata": {},
528 | "output_type": "execute_result"
529 | }
530 | ],
531 | "source": [
532 | "mu_result.shape"
533 | ]
534 | },
535 | {
536 | "cell_type": "code",
537 | "execution_count": 23,
538 | "metadata": {},
539 | "outputs": [
540 | {
541 | "data": {
542 | "text/plain": [
543 | "tensor([[-0.0331, 1.1830, -0.0299],\n",
544 | " [-0.0395, 1.1291, -0.0380],\n",
545 | " [ 0.0049, 1.6027, 0.0214],\n",
546 | " [ 0.0227, 0.6376, 0.0077]])"
547 | ]
548 | },
549 | "execution_count": 23,
550 | "metadata": {},
551 | "output_type": "execute_result"
552 | }
553 | ],
554 | "source": [
555 | "mu_result[1:5,:]"
556 | ]
557 | },
558 | {
559 | "cell_type": "markdown",
560 | "metadata": {},
561 | "source": [
562 | "# Plot Embeddings"
563 | ]
564 | },
565 | {
566 | "cell_type": "code",
567 | "execution_count": 24,
568 | "metadata": {},
569 | "outputs": [],
570 | "source": [
571 | "from mpl_toolkits import mplot3d\n",
572 | "\n",
573 | "%matplotlib inline\n",
574 | "import numpy as np\n",
575 | "import matplotlib.pyplot as plt"
576 | ]
577 | },
578 | {
579 | "cell_type": "code",
580 | "execution_count": 25,
581 | "metadata": {},
582 | "outputs": [
583 | {
584 | "data": {
585 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWQAAADuCAYAAAAOR30qAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsvXl4JGd9LvrW0t3qllrrSCONNNql2ffReMYQZzAeBhwYTOwQ4F5w4iTH5IR74MKDM8fYTswxj81hu+SBAzkX54kJ8HjJch0g8UawIQz2aOyxZ/EsUre2bqm19b7Wev/QfDXVraruql40klyvn3lm3Krl61LVW7/v972/90fJsgwLFixYsHDjQd/oAViwYMGChSVYhGzBggULqwQWIVuwYMHCKoFFyBYsWLCwSmARsgULFiysEliEbMGCBQurBBYhW7BgwcIqgUXIFixYsLBKYBGyBQsWLKwSsCa3t8r6LFiwYME8KCMbWRGyBQsWLKwSWIRswYIFC6sEFiFbsGDBwiqBRcgWLFiwsEpgEbIFCxYsrBJYhGzBggULqwQWIVuwYMHCKoFFyBYsWLCwSmARsgULFiysEliEbMGCBQurBBYhW7BgwcIqgUXIFixYsLBKYNZcyIKFvJBlGaIoAgAYhgFFGfJUsWDBAixCtlAmSJIEURQhCAIymYzyOUVRYBhG+UPTNGiaBkVRFllbsJADi5AtlARJkiAIghIVUxSlEK4sL7m1EqIOh8NIJBJob29XtmMYBizLWkRtwQIsQrZQBGRZhizL4HkekiQBgEKkhITJZ+q/yT4Mw0CWZSWq5jguax+LqC28U2ERsgXDICQqCMIyIjYC9XZ6++kRNYCs1AdJf1hEbWE9wSJkCwVBSDIQCKCxsVEhQbNEmBtB621TiKhlWc7aRk3QuXlqCxbWEixCtqALopgQBAGyLOPq1as4fPhw0URnhJDz7WuUqDmOQzgcRmtrq+6CogULqxEWIVtYhlwiJnldms4vWxcEAVNTU5iZmUFVVRWqq6uVP06nsyRC1oMWUUuShGAwiI0bN4LneXAcl7UNiaQtoraw2mARsgUFsiwrigk1ERNQFAVJkpYRM8/zmJiYQCAQQHt7O/bs2QOO45BIJJBIJDA/P49kMqkQPMuyClG7XK6CRF8stI5LXgiCIIDn+WXbW0Rt4UbCImQLChELggAAy4iYgKZpZTEPADKZDMbHx7GwsIDNmzfjyJEjoGkaPM/DZrOhuroakGOg5DRkqhrhCAefzwe3241EIoGFhQUkk0nIsgyn05kVUVeKqHOVH+prAGgTNdFSq1UfVtGLhUrAIuR3MNTFHEBhxQRJOaRSKYyNjSEcDqOrqwsDAwMKeWbJ3sRRMOJZyBQFgAZL7wLLsmhubkZzc7OyHTkmiajVRJ2b+nC5XGAYpuzXohBR56ZwZFkGTdPgOA5OpxN2u92S6FkoGRYhvwOhVcxhhEBEUcSlS5eQTqfR3d2Nbdu26S60UUiCEd+ETG0AKAaQM6iiXocs71q2PUVRcLlccLlcy4g6nU4rRB0MBpFMJiFJ0qoh6tHRUXR1dcHlcmXtQ9M0WJa1qhMtmIJFyO8QEDUCz/MKmRgliFgsBo/Hg3g8jvb2dqXSLj8ykCkskTEAUA5QlAAKXN691KAoCk6nE06nExs2bMj6LvmI2mazIZVKIRqNorq6uqJEDVyX3ZGxAUsvPXUJOdnHKnqxkA8WIa9zqIs5FhcXMTs7qxvZ5iIcDsPj8UCWZfT29gIA6uvrDe0ryy4ANkBOAZQTkCOQKTdEqfRbLh9RZzIZLC4uIhaLwe/3I5FIQJIkOByOZRE1y5Y+llxNtF5ETba1il4s5INFyOsUuVV1JDrLJRCt/YLBILxeL1iWRX9/P+rq6gAAPp/PcGGHDAck5l2gxVcBOQqgDhz2QJbny/k1l527qqoK9fX1CIfD2LZtm/KdMpmMElH7/X4kk0mIoqgQtcvlQk1NjWmiLnQ9c8dXbNGLOvVhKT/WLyxCXmfQ0hCTPwzDZKkkcvebn5+H1+uFy+XCtm3bUFNTk7WNWR2xTG+ASP0eAAEAC/ApAJUjZD0Qoq6qqkJTU9P18clyljxvenoaiUQCoijCbrdnRdTV1dWaRG2GkPONzwhRj46OoqenR0l3WBK99QeLkNcJ9Io51MiVrZH9ZmZmMDExgdraWuzevTtrgarQ/gVBUQBs1/5Z/sKQUkBRFBwOBxwOBxobG5XPc4l6ZmZGl6hNXw+T41MTbCKRUMiYGDVZRS/rCxYhr3EUKuZQQ02okiTB7/djcnISTU1N2LdvH6qqqvKeq1RCXW2ErId8RM3zPOLxOJLJJAKBAGKxGM6ePbssR11dXQ2bzVbWceXOeLR+DlhFL2sZFiGvURgt5lCDpmmIoojx8XH4/X60tLRgaGgIdrvd0DmLipBVWCuErAeKomC329HY2KgQdTKZxM6dOyHLshJRz87OIpFIQBAEpUCmHERN1gLyjU/9N0EhotaT6FlYeViEvMZgtpiDgOd5TE5OIhQKoampCTfddJNplcE7JUI2AxK12mw22O12NDQ0ZP2cpD6SySTm5uaQSCTA83xW+Tj5U+jFWKzqopiil0wmA4ZhUFNTY2mpVxAWIa8REF2r3+9XdMBGHgyO4zA+Po75+Xls2rQJbrcbPT09RY3BSIQcCoUwPj6eRTg1NTWw2WzrmpD1YLfbNYma5/ksr4/x8XFdoibXrtzIR9SLi4tgWXZZNG8VvVQWFiGvYuR25hAEAdPT09i8eXPBfdPpNMbGxhAKhdDV1YX+/n4AQCAQKHo8+Qg1GAzC4/GAZVl0dHRAkiQkk0nMz89jbGwMgiCAZVmkUin4/X7U1NToKhfWEop9wdhsNtTX16O+vj7rc57nkUwmEY/HsbCwoBA1wzBIp9OYmprKiqgrRdSSJCmkS2AVvVQea/tpWKfQ68zBsmzBCDWZTMLr9SIWi6Gnpwdbt25VHghC8MUil5BJJOX1euFwOBSpHHmB5EaFqVQK58+fhyzLCAQCSp5VrVwoRgt8o1FOwrHZbKirq1O03wSCIGB4eBgMw2BxcRGTk5PgOA4Mw2imPkodkyiKyyocraKXymPt3PXvAGgVc6hv3HwRaiwWg9frRTqdRm9vL3bs2LHshi/1ASApC1mWsbCwAK/XC6fTiR07diw5uxWA3W4HwzDo6OhQPsunBVYrFwhRV6IMuhSUQ4dsBCRa3bRpU9bngiAo125xcRFTU1NK/tflcmURtcPhMDxWLZvVfDCipVZ/RnTxVtFLNixCXgXIV8xRCJFIBB6PB5Ikobe3Fw0NDRW9ocPhMCYnJ1FTU4Ndu3bpapa1oPVCyScxU1fX+Xw+pQw611iourq6Yp7KRnAjCYRlWd2IOplMIpFIIBQKwefzIZPJgKbpZddOi6i1IuRiYLToxev1orOzEzab7R0t0bMI+QbCSDGH3n7q8ua+vr5lD2S5xzk7O4uJiQk4nU7s2bMHTqfT9HHMLOrlq67TMxYinsosyyqzjBtJ1DcSLMuitrYWtbW1WZ+Loqhcu1AoBL/fj3Q6DZqms8rHeZ6vKAHmEnU8HreKXmAR8g2BmWKO3P14nsfp06dRVVWFrVu3wu12V3ScMzMzGB8fR0NDAzo7O8GyrD4ZSxFQ/KtghHlQaIXEHgKo/MUmZpHPWIh4KodCISQSCbz++usAsMz83ul0rjmiLldqhGEYXaImEXUkEkEkEsHFixfBsuyy1EdVVVXZSZBE5O/0oheLkFcQxRRzkP0CgQDGx8chCILpVIFZSJKklFM3NTXhwIEDcDgc8Pv9yx6G64PkQHH/DoAD4AIteUGJKYjM+66VT1d2ak9R1z2VSYS3Y8cOSJKUZX4/NzeHVCoFAMuIhvT9W42odLTPMAzcbrfygo/H49i6dStYls0i6unp6ayIWh1Vl3r9KlH08u1vfxsnT54se9VkpWAR8gqA5Mrm5uYQj8fR1dVl6MaVJAnT09OYnJxEQ0MD9u3bhzfeeKOodIHRcZJy6ubmZhw8eDCrWCFvykEOg5ITkJlWQBIh0xtBSTMAkwZQmfEagTpnqgaR5SUSCcRiMQQCAaRSKYVoKh0RmsVKLR4SkIg1l6jVPycvumg0ipmZGaTTaQAr+6IrRNT/9E//hC996UsVOXclYBFyBZHbmQNY0gcXujlFUYTP54PP50NLS0sWMRKlQykLLrkPtyiK8Pv9mJqaQktLCw4dOqQZUeQvDGEBSIB87eeyAAoUVustRtM0ampqljna5U7d1RGh1mLYSmGl8+GFzkeq+HKvn9aLTouoSURdqe+kDh5u9MvUDFbn07KGkVvMAVxfwGBZNouccyEIAiYnJzE9PY1NmzZpEiPDMCWtgKsJXRRFTE1Nwe/3o7W1VZeICfJGyFQDZHY7KOEiaBmQZBECewSg1sZUkSBfRKheDCOqhVQqhcuXL5ddB5yLlSZk0jPQLPRedOrUUTwex+zsrJI6cjqdyGQymJubq0iO3yLkdyD0ijnUNwMhwVxwHIeJiQnMzc2ho6MDR44c0SXcUg1+SGPOQCCgEL9RX4u856YoyOzNkOlOiHwUIu0GmNaix7naoLcYdvr0aWzatAnxeDyrYKMYr4p8KGQstNqRL3UUj8cRj8eX5fhLXYwVBGHV6dYLwSLkElGomEONXEJOp9MYHx9HMBhEZ2cnjhw5UvCG0yN1IxAEAel0GmfOnMHmzZtx+PBhUzdsQdkaRQHMZsiyAFkUsXbpwzgoitIkanUJtNqrolj3t2Ij1mKxUp4jNE3DbrfD5XJleayQiJqkP+bn55FMJgFAs8Gt1rWJRCIVlYNWAhYhF4liijkImSaTSYyNjSEajaK7uxtbtmwxHP0UEyHzPI/x8XHMzc2Bpmns27fPUGVdMecmDVFFUVSmrqtdwVAJ6JVAEz9lLZtOcq20OpSsZ021VgpOHVGrO5FLkpSlQ19YWEAymYQsy1kRNcdxiMViy7xCVjssQjaJYos5ACidkM+fP4+enh5s377dNEmZiZDVTm8kAj937pyp86lRqHTb4/GA53l0d3eDYRglQiQLOxRFIZVKYXJyUimHrpRBzmqFzWZDQ0NDls+H2vg+t0MJKR+naRo8z5etgq4QVlrRYdS7RC23UxM10aGTe+7pp5/G008/jVAohI985CPYvn077r77bgwODuoe+7nnnsNnP/tZiKKIP/3TP8XJkyezfv7Nb34TP/jBD8CyLJqbm/F3f/d36OrqArD0XO7atQsA0NnZiX/91381exkAWIRsGMUWcwBLUyev16s4nh06dKjoG95IlJrJZDA2NoZgMKg4vZGxlpKD1tpXTcT9/f1oaGhQtNZaCobh4WHY7fashTGyYp9r1/lOgZbxPXDd54M4vyWTSZw9e1azfLycPh+VbEulhXLketU69A0bNuC+++7D0NAQfvGLX+Bzn/sc3n777byqGFEU8Rd/8Rd48cUX0dHRgaGhIZw4cQLbt29Xttm3bx/OnDkDl8uF733ve7jvvvvw1FNPAVjKd7/55pslfQfAIuSCUBdznD59GjfddJNhIiblzTRNo7e3F/X19Th16lRJ0Ue+CFltuamXCimFkNURshYRGxk7wzBobc1e7CMGOVr5VkLU5O+1tkhTCtQ+H8SMp7+/X/H5IBG1Vvl4ofxqPpQqqzSLSkX9kUgEDQ0N6OnpKegBfvr0afT396O3txcA8LGPfQzPPvtsFiG/5z3vUf59+PBh/OhHPyr7mC1C1oFWZw4jK91qJzSHw4EtW7aUtbxZi1BTqRS8Xi+i0egyy00j+xsFRVHgOA5vvvmmKSIuBD2DHBIdJhIJ+P1+JJNJiKKoRIeEpIshnbUGtXZc7fORWz6eTqeVa6bOr2oVa+hds5XOV5OZY7kRiUQM55D9fn+Wz3hHRwdee+013e0ff/xxfOADH1D+P51O4+DBg2BZFidPnsQdd9xR1JgtQs5BbjGHUdc1YsAzNjYGt9uNnTt3FrVwVgjqCJl4H8fjccM56WIJORaL4cqVK4hGo9i3b19eIi5X/lFvGk8WddRTeWBp2khaDqlz/OsBRkhS7fOhtxAWj8cLlo+vVJ6aoFLnC4fDhpo5ANqqEr1750c/+hHOnDmDV155RflscnISmzZtgtfrxa233opdu3ahr6/P9JgtQkb+Yg41yJRd/bna96GhoQF79+6tWGkzsESoqVQK586dQyqV0vU+zre/GUJWpyY6OjrAMExZouJioWcupC48CIVCiMfjGB4ezlqtJxH1WlxILEWHrLcQpr5m6qo6MjucmJhYkfJxQRAq4s0SjUYNR8gdHR2YmppS/t/n8y3zngaAl156CV/5ylfwyiuvZOWkyba9vb04evQozp49axGyWRgp5lCDRF42my2r3Li5uVkx4CkEkvooZkoYi8UwPT0Nnuexfft2NDU1mX5IjBKyVo44nU5jenra9LhXArmlzYIgYMeOHYrMMB6PIxgMLivcUC8mml1IXMn+gJXQIauvWUtLi/J5OBzG1NQUHA6HpqGQ+pqZMb3XQ6Ui5Gg0aliHPDQ0hJGREYyNjaG9vR1PPvkkfvKTn2Rtc/bsWdx777147rnnsq5XKBSCy+WCw+HAwsICfvOb3+C+++4raszvSEIm0jVRFAsWc6jBsiwymQx8Ph/8fj/a2toKlhvngqQczDxc0WgUHo8HgiCgqakJNpstKzo0g0KEnG+xzoyf8WqBXim0usmoWg9st9uX6YH1yGIlUyIrmdeVZRkOh2PZ4qv65ZarksklajOzkEou6hmNkFmWxXe+8x0cP34coijinnvuwY4dO/DQQw/h4MGDOHHiBL74xS8iHo/jD/7gDwBcl7ddunQJ9957r/JsnTx5Mmsx0AzeUYRcSmcOjuOQTCbxxhtvoLOzE4cPHy5qIYIQshESJ91AZFlWuoHMzs4iFouZPi+BHiEbUU2UWrZNsBpyu1pNRtXtpOLxOPx+v9KlRK1eqKmpgdPpXNGX00oSsp7KQu/lRrqT5JaP58oZ9crHK7moZya9dvvtt+P222/P+uzLX/6y8u+XXnpJc7+bb74Z58+fL26QOXhHEHIpxRyZTAbj4+NYWFiAzWbDwMBA1iKTWRgp7AiFQvB6vaAoalk3EJqmiy6dJvurSdWMfK1cEfJqIGQt5GsnpadeIPcHIZ5K5VpXczSu151E3e9PLWdkWTaLqElX7XLDLCGvBqxrQiYaYo7jcO7cOezbt89UVd3Y2BjC4TC6u7sxMDCAkZGRkiPEfIQcDAbh8XjAsiwGBgaW3eBk/1LNhURRLEpHXI4IeTUScSHoqRc4jsNbb70Fp9OpadWp1lCXYiwErGyEXK4Ugp6cMTddFIlEcO7cOaX7uJqsS4mcE4lERZROlcS6JOTczhwMwxjyIQaWOiWMjY0hkUigp6cH27ZtU/YrZJ9pBLmELMsyFhcXFd1yobZMpZgLAUskMjMzg7m5OdM6YqMRcqHOD2stD60HYqm6ceNGbNy4UfmcWHVqOcDlFroYJZyVJuRKnis3XRSLxbB///6siDoQCCAej0MURYWo1d3HC103co+tNX36uiJkrWIOoxFZNBqF1+sFx3Ho7e3VVDAQlUUpIISqLiBxOp3Yvn37slJjLZSiI/Z4PEgkEqirq8POnTtNH8NIUUwwGMTo6Cg4jstaICN/rydC1ksj6Fl1qv0q1IRD/CrUhJMboa50yqISOV09EAWJ3W6H3W5f5vNB8vq5BUKFrpuZ53+1YF0QcrHFHMD1fC0AZeFMD6SbcSmgaRqLi4sYHR1FTU2N6f54ZiPk3NQEz/OIRCLFDD0vCBE7HA5s374dNpsNgiBkVdqRh+rixYuora3NWiBbaw8OYJ4k9YyF1BWJU1NTWWXQ5EWWyWRW7BoRslsNyJfXz2Qyyj2lvm5XrlzB66+/DkEQcO7cOWzZsqXg9ynFWOiJJ57AI488AgB44IEHcPfddxf9fdc0IZOb2YiGWK3/VacJ7Ha7br42F0T2VuxYZ2dn4ff74XK5sGfPnqIKSErREQPAwsJCWc1jwuEwRkZGYLPZlChfkiQlQs6ttHvjjTfQ09OjVI6RzhG5BRzlyLuuBagJp6mpSflc3UU7Ho8jHA4jHA4vM76vqakpixZYjbVg9akuH8+9bps2bYIkSfjtb3+Lr3/967h8+TI+/elP45577tE8VinGQsFgEA8//DDOnDkDiqJw4MABnDhxoujFxDVNyOrafiMaYp7nEQ6HMTY2hurqasNpAoJi8reyLGNmZgbj4+NoaGjA5s2bYbPZiq7mKzSGQot15ZKuRSIRjI6OgqKognnv3PNXVVVptqFX510nJiZWvcFQJdMIavey5uZmcByH1tZWuN1uRWKmXkhkGEazIrEYVDqHrEa501cURaGjowO33HILXnzxRfzwhz8suE8pxkLPP/88jh07pgQdx44dw3PPPYePf/zjRY1/TRMyYIxgJEkCz/MYHh5GY2Nj0dGpmZSFuqS6sbFRqeSbnp4uOsoG9FUWRlUTpRKyKIp44403IMsy+vv7TXdk0Msh6+Vdcw2GcnXBJJq+EWmPGyFF0+tZp14QW1hYWNahxIxyYSXd3ioVja+UsZDWvn6/v8hRrwNCzgd1W3tZlrF169aiK9wAY4SsPmdzc3NWx2igdJUESb0QmJWvlbIoODo6inQ6jV27dhXdicHsop6ewZBWw0yS9rDZbMhkMkrapFJYaULOd658jnmEqHON79UzD7Vj3kqaC1WySs9osFCKsZCZfY1gXRKyIAjw+Xzw+XxobW3F0NAQPB5PyQ9PPjJVe1u0tLRgaGhIkwzKQchAcX7EgHlCjsfj8Hg8yGQy6O/vRyqVMkTG+XL5pU5Tc6fzBCTtEQwGwXEcLl68uCxKLGfaYyUJuVgvCz3lQq6fciKRALDkmJdIJBAOh0HTdMVnHpVqRLpSxkIdHR14+eWXs/Y9evRo0eNe84Ssvll4nsfk5CQCgQA2bdqUVd5MVv1LgVaELIoipqam4Pf70draWtDbolRCjsViSCaTuHz5clF+xEYJOZlMYnR0FKlUCv39/VkLJ6WgkrI3kvZgGAaJRAI7duwAULm0x1otndbzUybubxcvXkQqlYLH41FmHmqvinK23jLTvskMzETIpRgLHT9+HPfffz9CoRAA4IUXXsCjjz5a9LjXPCEDS+XNExMTmJ+f1+2mXA7JmvoYgiAoRNze3o6bbrrJ0I1VLCGrI2K73Y6hoSHTxwAKEzJ5EOPxOPr6+rBhw4ayR0grrUM2m/Yg5GPErnO1R8hmoE75dHd3K4EFMRUi1qblbL1VSS9kdbFOPpRiLNTY2IgHH3xQeR4feuihkqwV1jwhBwIBjIyMLOsdlwuisigFhMw8Hg8CgQDa29tx5MgRUzdUqTrihoYGnDp1qpjhA9An5HQ6DY/Hg2g0ir6+vrwey0am6nrbrBa9cb60h9ooR632yE17rKYccjmRS5L5TIXUrbfGxsaWddAulCKqlLFQNBrN29A0F8UaCwHAPffcoyupM4s1T8jNzc2GojibzYZ0Ol30eXiex/j4OBKJBFiW1YzCjcAoIRebIy6EXELOZDLwer0Ih8Po7e0t2HVEy6Rfa5tK5pArCT3y0XKB43kesizD6/VmtZOqBHGutNubke9QjtZble6nt9aw5gnZZrMZinyLTVlwHIfx8XHMz8+js7MT1dXVSoVOMSg0DiNEXIrJPSFkjuPg9XoRDAYL9uHT2r9YcljthKwHrcWxcDgMv98Pt9uttEZKJpMVybmuZIQMlDaTMdN6iyhhZFlWrlU5HPPMyN5WE9Y8IRuFWUImtoqLi4vo7OzEkSNHQNM0pqamSpqq6kXIRFYmCELBiJhokYshRZ7nkUqlMDw8jJ6eHs3O1PlghFA5jsPi4qKmHeVaJWQ92Gw2NDc366Y9gsEgpqamssyFinEzW62WpUah13prYmICwJK6g7SR0qrcNGt6b0XIqxxGI+l0Oo2xsTGEQiHFdlNNfCRCLHaalXtDmSFi9RjMrk6TlMvc3BxomlZeMGZBSs8LnaO+vj6riowQEcdxJS+urhboXYd8XUri8Tji8ThmZmYQj8chSRKqqqqyiFqrI/RKGuWsJPFLkoSamho0NzdnqReIhJHI8sy23jLTvmk1Yc0TstGbp1CETPyPI5FI3ik8OU6pea9iiJjAzMKgIAiYmJhAIBDA5s2bceTIEbz66qslpRxyFwUFQcDk5CRmZmaUbiqCICjXj5gMxeNxpFIpjIyMKNIrQkRr1WSoHOZCxPw+tyO0mngkSVrzUbIW9Bb18jnm5Wu9BUD53Ig/zWrDmidko9Aj5GQyCa/Xi1gsht7e3iz/Yy3caB0xYExLTEhyenpaIeJyLAqpzy1JEqampjA1NYWOjg5loTM3cmRZVvG/TSaTaGlpQV1dXRYR5VbbqYnarJRqpVAOgtQzv8/tX5dOpzE8PFySp7IRrHQ6yeyiXqHWWyMjI/jud7+LqakpHDp0CD09PbjrrrvyeksUcnr71a9+hc997nM4d+4cnnzySdx1113KzxiGwa5duwBcl8KVgjVPyEYfiNypdiKRUITvvb29eWVeahS7OKiOiEvREQOFKwbV+mizsrxCIBGyz+fDxMQEWltbTfUXVKs09IhIS0qVr9T3RqGSEWtu2iMYDOLQoUPL0h6kFJrMNtSWpsVcn5V2eivHbFPtmHfTTTfhxz/+MW655RYMDw9jfHwcHMfp7mvE6a2zsxN///d/j69//evL9nc6nXjzzTdLGr8aa56QzUJdCqxnRJ8PZglZKzVx6tSpkh5mrQiZRKs+nw9tbW2GC1XMgEyvz549i40bN5ruuA0UXtTTmqqqPYOJPjiZTALAsmh6JS07b0QKoVDag/SvSyaToChKs5VUvjGvpI8FOV+571OiSGEYBn19fXm3NeL01t3dDWBluo+sC0I2snIfjUaRTCZx6dIl9PX1FV1NY7SNU74cMYlwi70R1RGy2syotbW1YkQ8Pz8Pj8cDQRCwbdu2ok2ailFZ6HkGS5KkadlJ0zQEQcDMzIxCRpV4mFaLWsRo2iNX7aGV9ljpCLkSL4B4PG7YDtas01su0uk0Dh48CJZlcfLkSdy/MnAPAAAgAElEQVRxxx2mx6vGuiDkfIhEIvB4PJAkCQ6HAwcOHCjphivUxsnIYl2phExUFiRt0NLSYjpaNRrdke4m1dXV2Lt3LzweT8k53XIRGU3TmmqGcDgMr9cLnucxNTWlGOe4XK6siLocxu6reZGtkNoj1wGuqqoKdrtdWTgrNu1hBpUg5EgkYnhBr1S3tsnJSWzatAlerxe33nordu3aVTAqz4d1QchaURdpzURRFPr6+lBXV4czZ86A5/mS2tPopSzM6oiLXRiUZRmJRALT09Noa2vTdZXLByPVdqQTiN1ux86dO5XuvaX6Ka8EgdlsNjgcDnR2diqfEeMcYuzu9/uRyWSytMFmneBWKmVR7vPkS3vMz88jGo1ibGwsS+2R28mlnOMp9zU0Yyxk1OlND2Tb3t5eHD16FGfPnrUIWY1gMAiPxwOWZZe1ZiJkWiohqw3mi5GvFdt5JBAIYGxsDCzLoqurS8ltmUW+artoNKrI0rQ6gZRa2HGjCkPUhQZq0xl1tDg9Pa1og0k/O/JHq3pspQh5JdIIJO1RW1uLdDqt+ECQtBAxFjKS9rjRCIfDhqv0jDi96SEUCsHlcsHhcGBhYQG/+c1vcN9995Uy9PVDyIuLi/B4PLDb7bothcrh+EbItBQdsZlxyLKMubk5eL1e1NfXY//+/Zifny+J1LSi3Hg8jtHRUfA8j4GBAd0buhwRcqUJuRzaYBJNq6vHSJskQtJqrXUlsdI+FrmFUGbTHrmWpiuthjFTNm3E6W14eBgf+chHEAqF8NOf/hR/9Vd/hYsXL+LSpUu49957lWfi5MmTWYuBxWBdELLX60U0Gi3YI68cnsgcx2F6ehqhUKhoHbGRCFm9kFZbW4t9+/ahqqpK2b9cbaBSqRRGR0eRTCYN+R6v1QjZDNROcOrqMbW72ezsLILBIERRRDgcziKhchsMraSaw2hON1/aQy1bJGkPtaUpSXtU6j4w62NRyOltaGgIPp9v2X4333wzzp8/X/xANbAuCLm3t9dQ1FaKBae6hVF1dTUOHDhQ1HGA/IRMOmJ7PB5UV1djz549cLlcWduUGqXSNI10Og2v14tIJIL+/n7DvsdrIUKuFHLdzaanpyEIAlpaWhRJHpGc5Ra4lNJ0dCUj5FIanOr5VUiSlKX2UPspZzIZ+P3+sqY9IpFI2RoqrDTWBSEbJYliUha5qQmbzQaPx1PsUAHoEzJRNDidTuzatWsZERfa3wiInvf8+fMYGBgoWJmYi0KEKkkSJicn4fP5NPOwwOqRi5UDpIt2bvcNdYGLuukoKfFVR9OFCHClUxblVj3oNWaNx+O4cuWKsj4Sj8d1bTrNfH9SdbsWsS4I2ShsNlveqh019HLE6XS6bHloglAohNHRUdjtduzYsSNv2gUoLkoVBAHj4+OYnZ2FzWbDli1birIn1Du3LMvw+/1K9d7+/fsV8o/FYpiZmUE6nVYeOFmWy9rf7kYgXyrBbIGL1pSeHPtGdLdeCRA/k46ODuUzPZtOAMs6uejJFteq9SawTgjZjMEQ0aTqodBiXbkWBnmeRzgcxujoKBiG0V2I1NvfaIQsiuIyT4vLly8XHaXmRshk0dHj8aCpqQlDQ0OKsx7LssvysKR4gzSFJf3t1A+b2+0uu7SqEjDbVilfgYtWAQfpvEF+3ytRRSeK4op5h2gZC5lNe+S6vwEWIa8Z5CNTo6qJUs2FACh5s2AwiMHBQdOuVEYiZOI3MTU1pTR8JQ+zlmObmXMTQiYplpqaGuzfvz8rJaEX1bEsC5Zl0d7enjVWPY2w2+3OioputH+FGuWKXPWm9MQwh+Slz549u+zlpeU5XQpKySEXcy6jLxi9a0SKWEgnl3vvvRfz8/Pw+/04fPgwjhw5gg9+8IO6xy3FWOiJJ57AI488AgB44IEHcPfddxv96rpYF4RcigWnWflaKTc+OVcqlUJ9fb3iEmUWhRYFp6enMT4+jo0bN2qWUpeyMEdRFBKJBM6cOQOWZbOKRozun3tuPY2wenqfW3GXG03fCFQ6F066lJB7bnBwELIsKw1Ho9Gopud0KQtklcgh66EcxkJq97eOjg7853/+Jz74wQ/iG9/4Bnw+H2KxmO6+pRgLBYNBPPzwwzhz5gwoisKBAwdw4sSJkk3x1wUhG4Va9laKjtgs1Brf/v5+yLKM2dnZoo+nRajkmF6vV0kd6BFVPpP5fEgkEvD5fBAEAbt37y7aANzoubVaAeVOXScnJ5ctlrEsWxJZjkfCCKZS6KlvQMO1qF8PK61DJoZB1dXVyyR5Wi5wTqdzmS4435hXkpArYSwELD1vO3bswN69e/NuV4qx0PPPP49jx44p9+axY8fw3HPP5bX5NIJ3FCGzLItUKoWzZ8+uCBEnEgmMjo4ik8mgv79f+eVFIpGS8tDqCFmWZSwsLGB0dBR1dXVZqQM9mI2QSUfqeDyOpqYm2Gw2Q2SsNaUvlcD0pq6ZTCbLWzkSiWB4eDiLjNxud8H86LeGX8O/jFwGS9OgAHzjPcewd2Or4e9XCRjpp6f2nCbINb9Xe06rI2m15/RqTVmYgSRJhoi+FGMhrX39fr/5weZgXRCykYeCRMSJRALbtm0rmYjzPYzJZBIej0cptmhsbMzattQ8NCHUYDCIkZERuFwuTb1yof0Lged5jHnfRip+FR2bO9G4bR8Cs4uK2F8PpN2QnnFLJab66sWy5uZmjI2NYevWrUo0vbi4iPHx8Sxv5dxCjrOzAfx/I5dBAZBkGbwo4r//6j/w73/wCc1zrqSXRTEkWYzndCqVUnL1la6yEwTB8D1rFGT9wui2uTD6+yzVlEgP64KQAf0HPTc1kUqlSs/zXMtF50ZbqVRKiST7+vp0iy2MWnjqIR6PIxqNYnJy0pBMLheFCJm0fVqY82Bn72/h7hBB4RzkzBtg6NvXTGGIltuZLMtZ0bS6kONsaknxQVNLJMTSNCKZDHhRhE0jklurXhZ6krxMJoMLFy4ondYr7TldiQiZ3FtGfi+lGAt1dHTg5Zdfztr36NGjpsaqhXVDyLmoZI6YECohZHXVW19fX8HuI8VGyLFYDCMjI5BlGVVVVQVzZHrQI2S1MqO9vR037Y2DlmSAboMMAJIfDvocZHlrUecFbnylHtG+5hZypDgOz792CilBBA0RdpoGL0nY4HBgamJCMwe7VglZC+S6MAyDrq4u5d7W85wmOXu15MzsGCuRQ04mk4YXmUsxFjp+/Djuv/9+hEIhAMALL7yARx99tOhxE6wbQiYPuhEiLvVBIp7ImUwGXq8XoVDIUD8+9f5mCJnkojmOy+o6Uixoms4qIZdlGTMzMxgbG8tSZlCp/wAo9ZTSAYaKr5kI2SgkWcYXX/kPvDUXACggI0mQZGBjTTW+9u6jcDNsVg6WmAwRdUN9fX1Fnc5uZGGInrlQrgImmUxCluW8BS65KIfKIhfhcNjwYnMpxkKNjY148MEHlVZsDz30UNFNL7LGVPIRVgmIbWShiJhEh6XcCBRFwePxIJFIoLu7W7dDtR6MqhzUKRBi/FMu3SvpYkwWBOvr65crM5hBUNwoZNoFQAKQgkh1rjtzobFIGOfn50BTFGrsdoiSBBnAd469Hz11S/eROgdLTIbGxsYQDoexsLCgKBoKWXYWgxvp9qYHPQVMKpVCLBZDOBzOKt7Q8pyulDn9ShgLAcA999yDe+65p7iB6mDdEHI4HEZ3d3fB1ASpIivmRuB5HmNjY1hYWMCmTZuwa9euijwoJPIOh8OGUiBmQdM0EokEhoeH4XQ6sXfvXjidzmXbybYjkOQIKOE1ADRk2/shcdshSTN5j8/zPCYnJ5VKsxtR0JGP9HlJQorn4b4WvfGiCJoCpGu70BQF8p8WiMlQTU2NQkq5lp2kVFztG+x2u4sqFV9JKVqpvR6JJE8NdWNWdXVmJpPB9PQ0amtry/YCM2NOvxqxbgi5q6vLUBqgmNJnnucxMTGB2dlZdHV1YfPmzaitrS07yagJv5jI2whisZjS3mj//v35FwQpBrLj9yDbPwCAAigKVDKsS3bqMu3W1lZwHJdV0EGIWRRF8Dy/YiW6avy7dxRffe0URElGW00NvnnrMfTVN6DFVQ1/PAZc+27ddXXocOevoFSTl55lpx4Z5U7t87WTKlZlUQwqkRrRs+o8ffo0ampqsl5gpMBFvZBoJh1kEfIagxlCJmqDQCCg+EDQNK3Ip8oF9Xm6urpw+PBhQw+gmWgmmUwqmuhNmzYhmUwaV2dQ2TnFXEJWVwe2tbXh8OHDEEUxa3xkcSgQCCAajeL8+fMQBEFpX09KpMtZBgwAcY7DI6d+jdMz07AzDKKZDGwMDRtDwx+P4b6Xf4GffOgj+N7x2/HN06/CEw5hsLEJXxg6DLbA78BI6kWLjMyWiq9kymKl0kkURYGm6azKTGC557TH41EMqdQvML0Cl7XsYwGsI0I2Uz5dyBOZRHp+vx8dHR1ZPhDkGOUgZEEQ4PP54PP5NM+TD0YbpWYyGXg8HkQiEQwMDKCpqQnhcLigyZIe1KXP6hx0Q0NDVg6a5KgJyOIQuW4DAwNZhQta03xC1KWkPB459Wv8xj91TcKWRkoQUMsstfCy0zTGI2HwoojGKiceueU9po9fzLjMloqLophV0FFKC7LVjlzPaWC5A9zc3BxSqZRStUjuFYZhLEJea8jXNUQURUxNTcHn86G9vR1HjhzRJEiGYQzbeGpBkiQIgoBXX31ViSjNrtIX0hLzPI/x8XHMz8+jp6cnSwFiqDBElgEkANgA6joBEEKORCK4evUqHA6Hbg5aC+pFPb3CBfU0PzflYVYP+9qMHyxNg6Yo2BgGSUEAL4pgaRqiLKPGZi8YCeuh3NGk3kLZ5cuXYbfbs0rFbTZbVjRt1jNYC6ttsZVAzwEu13P6gQcewIULF9DU1ITx8XHs3r0bH/vYx3SvSyFjoUwmg0996lN4/fXX0dTUhKeeegrd3d0YHx/Htm3bsGXLFgDA4cOH8f3vf78s33XdEHIpBkNq/a0Rgiw2QlbLy2RZxt69e00XdRDoSefUedzOzk7N9EdBQpYToNM/AUQPQFGQbcch234XoChkMhmEQiGIomjKMpTAyO9Jb5qfTCYRi8U0q+4IOeW+GFw2G6IZDnaGAktRsNP00hhkgKEoPPzu3y06RbIScjSapsGyLDZs2JAV+XEch1gstsxXuZQijpVePCwVuQUuTz31FO6//34cOXIEjY2NuHLlii4ZGzEWevzxx9HQ0IDR0VE8+eST+Mu//Es89dRTAIC+vj68+eabJX+HXKwbQjYKdddoSZLg9/sxOTmp64ymdwwzhJzbqPTgwYN4++23S4pocklVkiRMT09jYmJimd1moX1zQWV+tkTGdBsAERT3c3BiM0bGKITDYTidzpJaWOU+jMF0CpwgoqW6GrQOwWl5WKir7mKxmKITpigKHM/j4f94CQuJBDKSpEjatm9oxmf2DyHOcdjS1IRNNeZeKLnfY6VKp3PPY7fb0dTUtMxXWV3EoVcqrlcSvRrldWYRi8XQ19eHm266Ce9///t1tzNiLPTss8/ir//6rwEAd911Fz7zmc9UfBbxjiNkm82GWCwGv9+P8fFxtLS04NChQ6ZW/I0SMumPNzo6CrfbvaxRaSnl02R/tcvbhg0bDH2XgoQsegG6CaAoSBKNRCyJydlfo7H1DvT29uLChQtFj1udspBkGf/r7Bm8MOYFBaC7rh5f/p3fRZ0jvzmS+lhaVXfRaBQ/PP0qXgr44aBpMAA4WUaHw4EHtu1Co90Bd9OGkkuAV1ulnlYRB+lSQqJp0ng0N/9aU1OzIgb4BJU6VzQaNaSyMGIspN6G5LYXFxcBAGNjY9i3bx9qa2vxyCOP4Hd+53fKMv51Q8hGHgxZlhXBOpmmFPNQGiHTUCiEkZERVFVVYffu3ctMVMphMBQKhfD222+jtrbWkMubet98hCzTblDcm0hmGISjNtTXyti6/Qgo+5KUrVyFIb+amsDzXg9qHQ5QALzhML539nWcPPyuoo8PLD083nQKEoAqmw02ADZJAkfTcLtcy2w7c3OxpRjMVAKlRJPqLiV6+VdiMMRxHARBgNfrNWzXWSy0uoWUA5FIxJBNghFzIL1t2traMDk5iaamJrz++uu44447cPHiRdONJrSwbgg5H9RRJBHzk4R8McgXIZOKQZqmsW3bNt0caymEHIlEMD8/j0QioUn2hZCPkGVxAZn4CChhDA6bjPYNNkj2OyDbdhbc1wjUhDx6zQeApCmcNhYjoWDRx1Zjo6MKNEUrUawoydhcW4/W1utWmlrRIzEaUuelyQq+3vepNIzYb5qFlsFQNBrFxMQE3G63Zqm4OpoulUwrFSEbVVkYMRYi23R0dEAQBEQiEcW5kShdDhw4gL6+Ply9ehUHDx4sefzrhpC1blh17pZ4BQPAxYsXSzqXFiHH43GMjIxAFEUMDAwUnDYVQ8jkHJIkoampCa2trUXZF+qR6uLiIkKBH2FjQxo17mNg6DRkKQpQToC63v7JSGSoRyDq/Tvcbsi4PvVPCwJ2NDVr7mcGgiSh21WNjdXVWEglQctAXZUDXzh0GAvJJL762imMhILoq2/AXx6+GS0bNmRFj0QLS6R48Xg8q5iDEHUliFILK1UYIssy7HY7mpubNUvFc7XBpZSKV8LHghzXyKzXiLHQiRMn8MQTT+DIkSP4x3/8R9x6662gKArz8/NobGwEwzDwer0YGRkpW5frdUPIwPWHnehjPR4P3G53lixLEISSNcTq4ghScJFKpTAwMGDYYMQMIRNPi0QioZyDPBTFjl9NyNFoFFevXgXLstjRuxEOeg6gHQAcWKoe5nT3LQXv7e7F8MwMzgSmQVMUml0u/Pn+4hcLAYATRXzx1y/j7YV52FgWLEXhMwcO4b1dPahiWfwfP/0XTMdioCkK88kk/usL/4affOj3Yc/RmWtpYYm3cjgcxtTUFCKRCDiOU8qoyyU/y0W5F8Au/uYK/vb//iFiwQT23bYT937zk3C4HLpRq9710CoVV7eSylcqXgmnNzMpJCPGQn/yJ3+CT37yk4qn+ZNPPglgqc/eQw89BJZlwTAMvv/975fFWAhYZ4SsJmKXy1WR3C2BJEm4ePEiotEo+vv7db2P9WBkHBzHKW5yfX19aG5uNqcl1gE5RiqVwsjICDKZDAYHB5ceOMEFpF4BxCRA1QJIAOzQsn0LIRKJQBRFuN3urAdSHSHbaBpfuvndmIhEkBEFdNfVo6rEh/RfR6/iUnARFGTQFIWMKODFMS/uGNgCTziE+WRiqRsIRYEBsJhKYSwSxpbGprzHVbdOIsUc586dQ3d3t6KbJvIzsq065VEK+ZQzEvePzOCRu76FTHLpJfubfxlGOpHBff/wF6aIX69UXN1KKl+peLF+MkbHZgSFjIWqqqrwzDPPLNvvzjvvxJ133lnaIHWwrgj5rbfeAkVReRtvlnpjE5Ik3UC2b99e1DGJhacWBEHA+Pg4Zmdn0dPTgy1btiw7RykvFo7jkE6n8eabb2a/TOQMaP5FUBABaQyAHVLVpyGzxn2X4/E4rl69ClmWwbIsRkdHIUmSkoO02+1Z46YpCj3Xcn7RTAbn5+dQxbLY3rQBTBFRoS8WhSBJsF27Xgy1VCINAA6GgSTLimmQLMuQr31eDGRZhsPhQG1tbZb8jCyYESle7hSfEHU+/4rc85QrQn7zFxcgiddf5Hyax5nn3lLGXSpJ6rWSIrMLUioej8dB0zQymYxyTVwuV0nnT6fThguUVivWFSFXyn0NuF75Njc3h+7ubiVSKpbg1XpoAkmSlErBjo4OxTtDCwzDFCwBz4UoipiYmMDMzAxomsbhw4ezxk/xZ0CJo5DpLQCzFZDmAXkKMPAdM5kMRkdHEY/HMTg4qJRJk0ieFHWEw2FEo1G89tprqKqqUmRaYVnG/ad+hZQgQJJl7NjQjK/c8p6sVIIR7NjQDBtDQ74mCRSvHQsA2mvcuLl9M37tm4QoSrDRDA63t6OrtrxmNHodObT8K4gjnpqUcn/n5UxZOFwO0Ez2sWwOtuznUUNrdjE5OQmapuFyuZBIJIr2VFYjHA6XRelwI7GuCJllWcPTeKMaUjWJqQ2GpqamSqpsym1USsx5WltbDRWomElZaBWNvPbaa8u/vxyGDNt1AqZcoKRF5MvMiaKI8fFxBAIB9PX1KTMGdfSvLupobGwEx3HYvXt3lo/FV996A4uJOFwMA4aicDYwjZ9efhsf2bbDFEnc1tWDt2am8c8jV0HJMrY0NuIvb7oZAHDKP4XXpn3X7DZpfHTrNnx638EVqdTTm+Kr/SsmJiayKu4qsXj4ro8M4R+//jMEZ8IQOAF2px3/518tTb9XUocsCALcbnfBruJTU1PgOM7Qi2ut+1gA64yQjd60Rox5SLRK2hnpGQyVQsiCIGBubg4ej2eZOY/R75APsixjfn4eo6OjaGpqKlg0ItPdoPESZJkHwICSQ5DY/brH9vv9mJiYUHw/jBCn2q5S7WORvPgWap3OJY8JSYKUyeCy34/Xk+msnCxZKNL73VEUhf+6Zz9ucdWgb3ALGq+t/M8nk3jg1y9DkGS4bHbwooifekbwX/aWVnFYKlFq+VeIoqjMKEghx+nTp8vijOd0O/GNX/81nnv8l4jMRbH3tp3Yf9suAMa7NZcDes+fXldxtTwx98VVU1ODcDiM6enpNW29CawzQjYKYlKvdUOoo8mNGzfq+lqUujhIrCglSTJlzkNQKEIOhUK4evUqqqurjReNsNshyR8Gzf0bABESOwTZ/r6sTWRZBs/zePXVV9HY2Gi8ylFOgeX/CYzgxcbaakDemmVatHNDM16ZmoT72oKbnWXxu9u3Y6ijE6IoKlFkrgyNkJPb7c56mbkYFk2qazoRDYOmKLD0EoHZGAYZUcRcMlHQ91j3K1WoUi+3OWs4HMbQ0FDZnPGq61y48/O/t+zz1VypV6hU/Je//CWeeeYZBAIBTE1NYc+ePfjCF76QJd9To1hjIQB49NFH8fjjj4NhGPzN3/wNjh8/bv4C6OAdSchaOmJZlhEIBDA2NoampqaC0WqxBkOkcESSJNTV1WHnzp2mjwHovxDIohqAgh2plxEKRUG2H4Vo+x0AEkBlE200GsWVK1cgCAIOHTqU9yWSdVxZhD3zP0BLlyDLLNrqE7BlEuAcDyrpkc/sH8J8MolLiwsAgI9u3Y53tW9Wvmuu7ErdLohU3nEcp5RSp9NpJJNJOJ1OxDgOlxcXkeB52GgGLE0jk+YgcAKevv8Z/OF/ux3tA22638Xw9asQKIoqyRnP7XYbemmupJdFOSr11KXin/3sZ9HW1oZAIIA/+qM/wltvvaV7f5ZiLPT222/jySefxMWLFzE9PY3bbrsNV69eLduLbF0RcjGOb2Ra7/F4UFdXhwMHDhjymzVLyOpGpYODg7Db7bh06ZLh/XORGyGn02mMjo4ikUhgcHCwYPkokZ9pXjOKAXD9BsuVx128eDEvGcuyDFEUlZQOLU+Alq4CqAcogBcoVEnnQMlzkKmNkGUZv/X70FDlwPt7evHRbTvQVl0DXywGXhLRWVu3zCJTq10QMRtaWFhAOBzG6Ogo5uIx/M/xUaRECZIkISYIcMo0MkkObT/34fVzYVx84QIefeFLaOnaADNYqdLpfOdRO+OlExmMnZsAY2excVMTkqnkMpMhtRQvtyx6NUfIRkDKptvb29He3q67XSnGQs8++yw+9rGPweFwoKenB/39/Th9+jSOHDlSlu+wrgjZKIgnMjH+cblcptMGRlMW6XQaHo8HsVhMMYgHlnJi5TAXUrd9MtN/z0gnCp7n4fV6sbi4iIGBgYJaa0IcxPSIYRjIsgxJEgEZkCFfWyBc+pckiaBo4B8unsdP3r4AigJEWcbpwAw21dTgzblZ0KDQ4Xbjm7e+D/V50i6cKOJKcBEMRaGjvh7hcBg7d+7EN4dfRUKSYGNoOGkKlCCCnoyi9+kxOGMSKDuDVCyNX//jq7jzCx8seN1ysVLdoAthbnIBXzr+KFLxNGRJRu+eLjz0L59HW9tS5E9eViQPqy6LJlF0KpVasfFWolIvGo1mlcbroRRjIb/fj8OHD2ft6/f7y/QN1hkhG304eJ7HlStXUFNTk1eznA+FImQ1Ufb29i7TK7MsW3KBSiwWw+nTp3V9j/MhXw5aLb/r7OzEwMDAsmPnRtdLxCspigCGYZQp6QseoEmuQqfLBwY21NppnAq04uGzv0JXbR3OzgYgAXAwLOqrquCLRTEZjaDOvjRTGY9E8J03hvHAzdqOWqF0Gn/+wr9hLpGADKDbXYu/2NwNAJi/tvhDxm8HAIqGMyqBZmilsnN6ehqnT59eVh6db7a0UikLI/jeZ59AeC6qfJ/Rs2P4t//9C3z4/1qyoFQ74+WWRZO8dCKRwJUrV5Y5weXm58uBSngvG1VZlGIsZGTfUrCuCLkQYrEYRkZGkEql0NLSgsHBwaKPpUfIaplcvv546lZIZkBM7r1eLyRJwrve9a6ibmwtQiYmTB6PJ68/tDrdoSZi8jP1DfpGYAbfOnMGVfKtuKvnTWxyhzASacLPfQdgoxmcnplBShTAXPOySF/TITMUBdL0maXpvKZD33njNKZjMdhoGhSA0UgIP7fZcHDPHtzc3oFT/ilIqi4lQy1tmKdGIfIiZElGVXUV7vyvH0Zbb0uW5IrkpclUX22Cr1aLVBJG0yLTowHlZUhRFLgUj6nL0wX3UxdyhMNh9Pf3w+FwKNchGAxm5efV+mAzznhaKPe1WwljISP7loJ1Rch6v+BEIoGRkRHwPI+BgQGkUqmSp2e5bZzUZveFDOLzjTUfFhYWMDIygvr6euzfvx/nz58vOsrIJWSiyqipqcHBgwfzRobkZULImHyW+514nsdz588hk+Hgrq7FzwO3IjyRwVwijt76avCiCEGWoN4rLeOff48AACAASURBVAqou3ZuWZYBecksqLe2DhzHgb6mwiBNMgFgLBwBhevXVJaB6fTS7/eDfQOYScTxk4sXIMkybu/rx+eHDuNXzlac+udhOGud+IMvfgib+pYKFnILGNSOcGoTfIZhkE6nMT09XXLfv3wwWqXXu7sLoUAEorA067I7bejf32PqXCSvq5aekRRAPmc8dV46nzNepWGUkEsxFjpx4gQ+8YlP4POf/zymp6cxMjKCQ4cOle07rCtCzoXalKe/v1/J3wqCgGg0WtKxWZZFKpXKUmc0NzebNrs3AtK/zm63Y8+ePXC5XFlRaTEghJxIJJRS50KqDOD6ND0SiSgPXy4Rk5ZYfr8fTTU1sDvsSqQtyUtRHIlYZSyVTzdUVSHJ86AoCl9+91H83DuK1wMzoCigq64O/21oSX6ofgmQlM/WxkaMhhavlUUvHa/XtfQ9KIrCf9mzH3+2e5/y/wBw2ydvwW2fvKXgddLzE+Z5HmfOnFH6MKrVDWopXqlKAqNpkU9/+1P4qw9+DXOTC5BECQc/sBfH/uh3TZ1LFEVd8te7Dkad8XKr7SqxIGrUC7kUY6EdO3bgox/9KLZv3w6WZfHd7363rC8gyuSFWZ1dEK+BvMXVnZZzTXmAJV2n3+/Hjh07ij7XwsICpqamkE6nUV9fj97eXtPdgE+dOoWbb75Z9+fJZBJXr16FIAgYHBxcVhZaaP98IP3A0uk0BgcHC7pVqRfs5ubmMDs7mxUhkT+pVErpxNLV1YW4IOCzL/475q6VxTI0he66eowEg5BkGeFMGixNo87hgCDJ6K1vwHePfwAMRWMqGoEgSeisq4ONzr7p1RF6guPwxVd+gUuLiwBk7NvQjE9t3IQ9O3dlmTGVG8PDwxgaum68RF5wJJqOx+OKh0WuXtrMeseFCxewb9++gtuKooS58XnYnXY0bSpMTFrf5+DB4isXCYgkkeSm4/E4OI6D3W5HTU0NqqurMTExgUOHDpX19/Le974Xr7zyymr1szB0UdddhHzlyhUsLi4u67SsRrEaYoJwOKwQ5cGDB4vyJCbQioA4jsPo6CgikQgGBwezxPClguS4FxcX0dXVhT179oCSZ0EnHwMlzUCmuyBVfQqgr0dBuQt2LS0typSeLAqRikNZllFVVYVUKgW/34/a2lp8+73H8erMNH4x4cW5uVlMRqKwMww+1D+IXc0tODsXwOXFBfTVN+CPd+9TyLerTn/6SR5khmFQb7Phbz/wIfjCYcxM+yFFY+je3LksmibTf7JvuUlar4US0UtHIhH4fD5kMpllzVn18rFmtMEMQ6PtWuqlWJQjr6uWJJL7BIDS/zASiSgzDLJtOZzxSJ57LWNdETJFUWhqatJUBahRLCGTRUFgqetsIBAoiYxJ2oBMedQub729vbovlGKg9svYtGkT2tralrofIAMm9R3Ichoy1QxIPtCp/wXJdT9kMHkX7MiYfT4fOI7D/v374Xa7IUkS4vE4otEoZmZmEIvFEIzH8Ibfhxq7HXaaQkKU8NzYKOwsg/0b2/DnJXpKzM3Own8tJ9ixbXvW7598B6JAIKkO8jf5Xuq8dLmg9rDIJScSPc7NzS2ToJFIciWVHJXWVZOUR3V1NeLxOHbv3l02Z7yV0oRXGuuKkAGgubm54C+HlE4bBSmMSKfTGBgYWBLhp9Pw+XwljZVoiSmKgs/nw+TkZJaBUblAFgPVfhlXrlxZIlppHpDiAHPN8IbaAIizEIVFSFhKY+gR8fj4uKJ/VmuUaZrG66Egnrl8EbIM3LllGxqaGmCfD4BlGPCCgPlEHGlJwv/7+jDsDIu7+gfxp/vzLyZqgZjrV1dX48CBA5ryLHVUTKBOeehF0pUiaQC6+VgyzScWlZIkQRRFTE5OKuRU7jWKlYZag2zUGS+dTispj3wGQ6tFhlgs1h0hG2kxZNQpLZPJwOv1KnIgNemUmvYg4wgEAvD5fGhubtb1zSgWsVgMV65cAcuyymKg+tySJC21Z4IIyCIABrLEAbIESbaDopcTMYm0Jycn0dHRoZkHPOWbwtdeOwX7NRnaN4dfxce27VD0yQIFZGQZDoZFU3UNeFHEM6NXcMDmgMzzWbactbW1miY6xO4znU5jy5Ytur0L9aBOeRDkI2n1fpUiaS0v4Wg0Cq/XC5ZlMT8/D6/XW5K38mpAIWMvs854fr8fFy5cAMMwiEQipgyGgsEg/vAP/xDj4+Po7u7G008/rbkw+MQTT+CRRx4BADzwwAO4++67AQBHjx7FzMyMkrd+4YUXssZsFuuOkI2g0I0rCALGxsYwNzeH3t5ebN26tawG8cDSjRAOh8EwjOFybS1oTWnT6bSit96yZYvmDaoQMt0Cyf4+UNzzWHqPyRBtd4BmlhvukMrGxsZGHDx4UDdSe3HcC4YCXNd+LnIcrgYX8YfbduDpSxeREUUwFIXmahdomoKdYsDLNmzbtQu1drsynScpj1QqldUdmnTn0FqwLQVGSFpZSEwkIIoiOI7LUppUIuXhcDiytK75vJXVi4dmO2ivFKEXWzat54xXW1uLq1evIhqN4sSJE4hEIvja176GY8eOFTzmY489hve+9704efIkHnvsMTz22GP46le/mrVNMBjEww8/jDNnzoCiKBw4cAAnTpxQiPvHP/5xWRqcAu9QQtYDkTCRssl8qYNib95YLIarV6+Cpmk0Njaip6enaDLWykF7vV4sLCygv78/L1mRfWVZhsh8ALJtAJQcXEpdMNn6VdJclWEY7N69u+AqtpNlIakmKaIso4pl8alde3B73wCmY1F86Ve/RFoQQFMUUryALU1NqL2mPtCqKMtkMhibnMQPXx9GQpbR66oGOzGBYDCoRNKV0ALnkjTxf56fn8fg4KCqPDw7mi5XXlprUS9fBEkUHgsLC0gmk4Z73K0kIZezbJphGGzZsgV33HEHLl++jGeeeUZJ8xjBs88+i5dffhkAcPfdd+Po0aPLCPn555/HsWPHlBfBsWPH8Nxzz+HjH/94Wb6DGuuOkM3cVOQmVFtutrW1FSzqKAapVEpphjo4OIj6+npcunSppLQHIVWSg56amsLmzZsNlVFTFAWOy0BKvwZG/DVAsZBs74OsImOO4+DxeBCPxzEwMGDY/PuurdvxW78PwXQKFCg4WAY7NjTjsy8+B0mWccfgVnz72PvxtddOIRCPY29HK7546Gbd3108Hsfbly/jW2MjmEqnIEgybNEQ/qylGR/auFFpX0/aAhHyqa2tLWuhAjGham1t1UzVFEp5FEPSZlQWWhaVWj3uSFcOdTQNVEYaqIVKNDiNRqNKHlprzUAPs7Ozit9HW1sb5ubmlm2j5X2h9q/44z/+YzAMgzvvvBMPPPBASS+2dUfIRsGyLHieRygUgsfjMWTgXgzUBj25UWupfhYkBz01NYXm5mZDnUYIUbjdbsz5fo4E/TOAdoFlWNjtFyBWfQGUrR+Tk5NKT7+tW7eCAgdIQYCqu+YGp4/e+gZ8+9j78YvxMciQ0Vbjxv9z+lXI187/6G//E/ff/G587/hyT1411C+ESJ0b05kMbDQDO0NBlCX877fO4hM7d2fl/Ih3cjQazVoYI8RD/pj5PZPiGZvNhn379unOaAqlPNSKFaOLh6VGrlp5aXVLLeIGx3Gc4r9CrlWl8tKVMBYKh8O6AcNtt92GQCCw7POvfOUrho6dz7/ixz/+Mdrb2xGLxXDnnXfiH/7hH/CpT33KxMizse4I2UzO7MyZM6irqzNu4K5zHK1zkpXx6elpdHV1YXBwsKx56HA4jEgkApZlDY0/Nw9aV1eHDY5R0LwPAA9JYpHhN8If+GeM+pe8jltaWkDTNMTkL+GS/w6ACJlqhOD875Dpjrzn66ytwx/vXmqO+thv/xMpgUeM4xRS/vtzb+LWLu3SXnWlH3khvDQ+Boq6/vulQYGXJQiSlGXNmeudPHnJj7/58x9gdnwerX3N+P0H3w+2moYoikqUSKLpXIWGKIoYGxtDMBhUZjVmUeriYSU8irW6chB72OrqasRiMUxPT2f1/FPrpUsdjyiKRafp9JCvbPqll17S3W/jxo2YmZlBW1sbZmZmNBfkOjo6lLQGsORfcfToUQBQbD7dbjc+8YlP4PTp0xYhm0EkElEkbFu3bs3ShpoFUVqooy11a6NCnhbFEDKp3hNFEfX19ejr6zNExiQiUyIxOQGa/yUopAHYQVM8HOwk3NUH8O53vxuSJCEajSIZu4oN1KPgwAGUHTKSkLn/gYzjW6jK8dPVgyRJiGQyoEGBpiiIsrxkGM9xqM4hwYWFBYyOjipl6OTa7d24EQxFIy0IYGkagixhz8aNqMozI0hEkvjy738D8XASrI3B1Nsz+MkXn8W3Tn0ZDMsoUWIwGMTExIRSWEC01PPz89i8eTOGhobKGikaXTwURRGhUEjxTank4qEsy7DZbGhpadFUNqjz0uq0UDH+FZXyQi7mhUk8K06ePIknnngCH/7wh5dtc/z4cdx///0IhUIAlpQUjz76KARBQDgcxoYNG8DzPH72s5/htttuK+l7rDtCLmQwRMqQ/X5/yekJQqg2my3L6N5oayMzhEym7+FwWKneu3DhQl75Xj4nNkqawJIvMQNZEiBDBs0AG1p2QroWvTidTrCpJ8DwUQA2yEhAllPgRRlXr55DMiUrTmhES6olUetvbIIMQIQMSpZB0RSqbTYEEnH02ZcWSnyLi/jbV3+DuCji/dt24HBfX9Yxml3V+M77PoCvnPo1FlJJ7GlpxZd07DgJJt/2gc8IsF/rqmx3sAjPRzE/tYi23o1KNZnaQCcYDOLq1avK4qLf78fCwkJWuqNUlzMt5JJ0NBrF5cuXUVdXh82bN4Om6YouHuqRpJ6yQd1KKh6PZ3WLLpQWKke3kFxEIhHFcN4MTp48iY9+9KN4/PHH0dnZiWeeeQYAcObMGXz/+9/HD37wAzQ2NuLBBx9UyuQfeughNDY2IpFI4Pjx4+B5HqIo4rbbbsOf/dmflfQ91h0h50LdSUNtMDQ7O1uyjphEyKSU2ul0mjK6z3WM04I69aHkc6+RgR6hF7LEBABBoEAJLCSJAmujsdRqToTMZN/U9P/f3pnHR1Xf6/99zixJJishAUIWIGQFZE3A1qUIKlr8Ua1Lsffeequ9t/ZWXGq1VqsVrdYFqdYN91qX2hZr1Wq1dQHcCGFTtoTsIWQj62Qms53l98fkHGaSSTJZJsEwj6+8XpKcmfOdmTPP+X4/3+d5PtJuvN1DDAgYEQQnZrORBQuWgSDgcrmwWq36l9NXohYXF0eJvYsndu/0zvoAsygyKTIKgygSa47A7Xaz7/BhfrmnGJuiIIgCu4o+58vWY+xtasLqdnFaWjo3FJ5KflIyL6/5blDvraIoVH5Vi8PmxGAUMRoNdHc5kCSZO/7fA1z9ux+w5NwF+vFard9qtTJ37lw/o4Imw+ud9hYXF6e/ztFYymvj0Ormc+bMCRj2NN6bh/211Opdl5YkKWBj1hNphjx58mQ+/PDDPr8vKCjg2Wef1f995ZVXcuWVV/odEx0dza5du4Y+2AEwYQnZ7XZTVVWl61V7d9IYqlsvEFRV5eDBgxgMBvLz84dsThhohqzlHldVVfWr/BBF0e/xwRCxVp+tP9rKouylxJqL8GZGKSjGb6Ea/AOXVDES5CQEWvXfyaYL9V54ERERJCcnk5ycTEt3Ny5ZYpLJTHfPxtqvP9tGe0/tGMCtKDgkD4mRUVy0+TVMwFlp6TgFsPSUL1ySxMsH9hFnNmMQRN4pL8Mtyfz6jODSy1RV5YlrXmD7P3YheSRcDhkBr2HIHGXC3tHNw//zNL9595dkzEmloaGBmpqafmv9/aW9aVrpqqoq7HZ7n6CloSzltdTA6upqZsyYQW5u7oCSRQh+8xAGJ+mRkmSgurSqqgEbs7pcLgRBICEhoV/H3VBhtVqHRcgnGiYcISuKQkVFBY2NjcycOTPgFwxG5rTT2jK1t7czc+ZMZs0aWu6shv4IubW1lbKyMuLj4wdstmowGALWHgMRsVZSqaysJDk5mcKl38AgLkPybEVQKlENmSiGFTrR9jwI2bgCo/oGijodAReqkIRsvtDvuRVV5fc7i3ivshyDIJIWG8dvl68kbdIkurZ4reFmwRu5KasqkkeiVbJhFgUUQeSfR2q9BpEePtBC6rvcbswGA9FGEx/XVvNrgiPkmgN1FL2zG0EUiY634HZ6cNqcRMVEYo70LqMlSWb3R1/S3N1AXFzcgEaXQDCZTAGX8tpMuq6uDpvNBtBH4dF7uW6z2SgtLcVisQx5HBqGsnkoSZJfLVoQBCRJCompJVBjVq2Ts9Pp1B13giDoIUNDvZkBQ3bonaiYcISsbU4MlgdhNBpxuVxDem7NwXfs2DEyMzMxm80jChfqTcjaF1MURU455ZRBW0tpj++zYdcLWqfryMhIFi5ceHwTUFVRTKcBK/2JGEB1Y3RuRJT2gOoG0YBsXI1svgQEfxffI8XbeXHflwgImEQRtyzx+51F3HnGcqKMRmweN6p6PH/Qo6pMirYgIKCoCngkPIpMl9ObL+3oIQ5FVXVyTrYE32bL3tmNwWhA6XGnRESZcXW7EHreGqXnxtXltJKf/61BM6CDhcFgCCgx02aIjY2NesfxCHMEFV8cobWhjSk5k1nxnTNHnVD6I2k4LrvTNg87OjqIiorSmwCEKhEPvN/RpKQkv+fW6tI2m61PrnLv6NJACDYL+UTHhCNkk8lERkZGUMcFW7LQeswdOXLEr3+d0+kcUR1aI9ShdoyG4zeeqqoqXRQfHx/vpx3VZvJa5rFvSUX0bMPg/gsgoRqykSL+F4Tjfze4/47o2YK3fhyBoOL9u+gfBVre3sbrpYe8ZCuAQ5Zw2CX+VVXJNUuW8j8LF/PIziLkHiKIN5sRRRGHJOOUJFRVJcJo5NbTzmTTnl3UWjvpeSrv68TbwPS706ZTV1cX1Oxpxtw0DCYDLqsDo8mI5JFITEnA7XDjcroRREjNSWHtuosxR45ur7jeEEVR3/DUJFIet4dfnHM3Rw7Vo8gqoijQWttOwQXz9bp0qHTAvYnWarVy6NAhEhMTSU1N1bNgfG/yMLqJeIG6oPRXl9aiS7WWWh6Pp0/37MjIyDAhn8gIJmAomJKFVterrKxk2rRpfcJ/RiNgqLOzk927dwfdMdq3TjhlyhQSEhL0WmZ9fT1Op5OIiAid6DMzM0lJSfH7AghSOQbnE4CEl2wPYuAV5Mir9WNEzz966sZaW6RIBLmsz3gq2tsxadZtRdFrxZIic9enW/nF3Pkcmzqd3S4HRqORSKOJJruNqs6Onmf29s6r6eyky+0ixmzW9coGQcAoiiRGRnLZgkUBSwG+BKaRdExCNLdvvoFHrn6WYzUtRMdbmDk/jcgkM5OnJTIzewanX7QUU4QJh83JR698SltjB4tXzmPu6XlD/xCHAIfDwRtP/4O6kgYvGQsCiqzy0ZNfcPkNF+v5FEeOHNFzk303DwOpWIYDSZL0buj9dYoJxebhUOCbq6yhd/fsffv28fOf/xyHw8Ftt93GokWLOOOMM4JSXAQbLHTeeeexfft2Tj/9dP7xj3/ov6+qqmLt2rW0tbWxePFiXnrppRE3g51QHUM0uN3uQQlZk8EtXLgw4N9967izZ88O+EZrqoKhym20/ns1NTWoqsppp5026AUdzIadrwZ60qRJmEwmbDabTtLaTC3Z8jKRyt9RsQA9KW+G2XiiH+95IjsRXauBDkBE+9gl06XIUbeiqF4iAdjVWM/tWz+mzenA1rPiEACL0YiiKDxW8A0W5+fT0N3Nj/75Ni5Zwu5245Jlok1mYs1mPJKEIIrY3C5MBgNWH+WJURC5b/lKzpud1ec91Oq1VqvVz5GnEZjZGMGt59xDQ3UzqqxijjCz6srl/OCuywBwdbu4dtltNFYfAxVEUeA/7ryES352wZA+z2CgKAo1NTU0NTXRsKuNP9/9pt7/zls6gL8cewaD4fh14Es+mpLF4XDoIULa64yOjh4SSR87dozy8nLS09P1WfFQXofvtdj7exYMSffutDJSaN+hRx55hL179zJjxgwuvvjiQR938803k5iYqAcLtbe398mxAPjwww/p7u7mqaee8iPkyy67jO9+97usXbuWq6++mgULFvCTn/ykv9OdnB1DgkV/JQstX9dkMjF//vwBa8RDnSFrG2vl5eUkJSVRWFjInj17BiTjYDbswHsDqaioICEhoY8GWvtiW61Wb3lDKUY0KSiqgigaMIjdqILqs0SVATfecoWKdi21OiO48eO3aLTZmJ1g4ZZT81g0ZQYrZs7inxVlOHrqvSLeDtKCIPBkbSVPzZnDx7VVOCQPsWYzXS7vDNjmcWMUoNNHheHwdasBFpMRWe2rtRZFMeAS12636ylxO97dQ2PtMURRQDR5+/G9+8xHfP/272I0Gfn3H7fpZOx9vMor61/nu9d/e1RnfNrNferUqSxdupT6SY38+e439RwSBIGcgkw/Mgb6DVryDRHybTbqu1qIiYnp8xqcTqe+R7F48eJhueWG4zxUVVU3tYQqSN5gMLBixQpWrFgR9GOCCRYCb2soX6ceeF/TRx99pDdHveKKK7jzzjsHIuSgMCEJeTgli+7ubsrKynC73eTk5AS1wTIUQtYalUZERLBo0SKioqL8Lt5ACOiw6wUtZ8FgMHDKKacE1ED7frGnTJmCsTsdQXYjqJ36l6imMZsjLdt7nGox5CVFIIouvaygYub1w620OdL45tQmfpj1Lma7SoSQwE2Lf8mqmat48NNt7G1v1W8ayVEWmux2DrW2IPT8Z3N7kHwItqMfHbZBEEiIjMQje11+wUCTnbndbo4cOcKkhEmYTSZkWUFySfo1sf2LIuISYinZVdrzBh1/DkVWsB7rImHqyDfYXC6X7qpcsGCB/tmk5U7n5y/+H49f8zy2jm7ylmbx8z8E/0XuL0RII+na2lq96aq2GeZ0OmltbSU7O9tPvjcaCHbzUFvVeIOtRm/zcLhGk2CChfpDa2srCQkJ+nl7Bw4NFxOSkIOB5nzydcAN9WINxmmnEb3H4yE3N9fPcNDfUjGY8oTb7aayspKurq4hJbEBKOI8jNIeQEAwxIGYRtrsH5KaFd8zk25BUcGgajMckFTY2TyNpAiJH2X/ExWBLo+JZFVFsN+No/pabpq/iJ8WfYZZFDEbDJgNBlyyjCDA2TMzefXAPhrsXleXgHf+3ed21kOagk3CIxgwmk0smDotqNflcDh0l51rajJbW1qpPXcalg/rMHd4id8SF8XC+QvBqDLnjGY+ea3YrxBnMIo4JAcWd9SQ64Flu6v44x1/oavNxpzl2cxbnUV2Trbf7FZDwaoFvFD2yJCefyAYjUYmTZrUJ2hJ63VoMBgQRZHy8nIaGhr8ZtOh6EDSm2hlWdYbD+fl5WHsKWmNRjutzs7OPg2ANYw0WKg/DBQ4NBJMSEIO5o2RZRmXy0VxcXEfB1ywGGiGrDmu2tvbgyb6YIhYy2xuaGhg1qxZAxoIAkGQvsLofh5wIKAiqA7cpnUgJiAAkZGRWAwHMTnsSIoJARkBBRDodBuZbGoGVcIhRaCi0G2XiTC56U6SOWC3k5s4mV1NDcg9G3xpMbHMiEvgzk+20NEz0xWAGLMZSVH83z+fi1w1gLOtm5u+cRpzk/oSWu/3RGsnlZ2dzR5rB7dv+Ri7zYEyP4GOvDimP3qQ6C6vRfyDP24jLimW0m0VTJ89lYZK78zIYDSw9o41dNmt1Dcexe126x2jtfp7f8v8+vJG7lzzAM5uFypQX9GIWTDzzdOG1xV8pJBlWe92s2DBAl1h49sZW7P6S5KExWLpo/AYLbS3t1NaWsr06dPJzs4OaFYaSTutUAYL9YekpCQ6Ojr02XldXZ1fE4HhYkIS8kDQNtRqa2sBRtS/LhAhK4pCbW0tR48eHdRxpSHYDbumpiaqq6v1PN7hOKuMjvUIdKCrJxAweP6OZF5+/CC5DklRcMqaZVrBKLpZmZnHRzXVIAiIgkS0GIHZqNDudvFQUSlWTy022ft+RBoMGHtmyBe9/meOObr9xtHlUzcOhJQv2pi6vYXUjKX9HuNrdklJSaGwsBBRFHn+0y04ul0IHgVRVVHMIl1Lk4n6dz1Om4uX73odRZIxGA0YTAaSUhNZe8t3yC6YzYy5aX7P73Q6sVqtdHR06MoHLYDIl6Q/f2snLocbwSBiEERUReWjVz/jv+9ZO7QPaBSgBTSlpqZSUODfPLa/ztia7bm9vb1P0JJG1ENVeEiSpHeu8S3Z9MZIE/GGawoJJlioPwiCwFlnncXmzZtZu3btkB/fHyYkIQe6aFRV1ZdvSUlJLF26lJ07d47oPAaDQSfkQBK5wQhTq61pO+f9bdh1dHRQVlZGTEwMixcvHr60RmlBVKt6/uFVTwi4vJ1CfA8TpiPJIiZRRlUFREGh2RHP7EmzyDBMZk9dO+dkfUSU2YxLknh8/5l4xARsSjeuni/OZJMZAWh3OnSjhy+MokiU0Ui3x4PUe/knq0iWHglbYmDTht1up7S0FLPZ3Cej2KPIqNo5e95P1SAge7xfZo/Tu5mryCqmCBNOu4uoOIsfGXsfetxppqUC9t4gPXr0KF1dXdTUVoOArj4BFYNpdPMaBoNWs1YUxd8ANAg0l1zvoCWn06nXpX0bjfrOpPsLWtI2r2fMmDGs1edQ2mm9++67w6rfBhMsBHDGGWdQUlKCzWYjLS2N5557jlWrVnH//fezdu1afvWrX7Fo0SKuuuqqIY+hNyYkIfdGe3u73pnYNztYm+EOl+C0OrSWEKZZcINZ7mkbdqmpqezbtw+Px6MvG7UfbYYhyzL5+fkjd5QpR/F+5B58C6eKmKv/v6DUYvS8hlsVMCAjYaTNFcs9e1ezwFzO8tlZZMy9HoNwBR61if3H4NOm3RzrtiPgnXergE1Vg9D0NgAAIABJREFUmBJloUUKbL4x4p1Fmw0GWhyOnpMLICkYXAoxlTbyT81mwVn+2RqaW7K9vb3fjOJL8ubwQGMLblnxypwlldg9rX2OU1UVWZIRDQKyZ+DNWWurjeJ/7kH2yCxZNZ8p06cQFRXFzvf30n6ki9nZmeyNLaHb6kBVVAwmkWWXzaesrEyfZUYFGVc6VGhyxyNHjuhNEEYK35tR7zhOTYLnG7Tkq5Ouq6tDVdVhKzn6Q2+Sbm5u5sYbb0QURR55ZOj1+GCDhT755JOAj8/MzGTHjh1DPu9AmNCEbLPZ9E2eQOJ3Tfo2XEK22+10d3dTU1PDvHnzgiLM3uWJtLQ00tPT9WWj1WqlubmZAwcO4PF4vEHySUm43e6RxxaKGahYEHBwnJTNyOaLvfVbQcDofARw4FRnglyNxehBQOKU+A4uXvRN4nQLcxIqScxMdKGyG0VVMQjevGNFVXF4PDjMMinR0dRYrf7DAOIjIrF73AgqRIsiFqMJp6pgNgosNybw/24+g0XnzNelYL7hO+np6WRlZfVLbpfk5LP1lc8o8lgR3DKT/l1PRJ094LGyJGOJi2L+8rkB/w7Q1tjBzWfdhb3TW3Z5af1f+cFDF7PnX/vZ8/ZBPC4JU6SRvKVZpGROpavdxjfWFFDw7QX6TLqpqYnu7m4/DbGWEjcSkrbZbJSUlBAbG0thYeGox1r2htlsJikpqU/QkmZMamlpwWQyYTabqaio0GfTo9lKS1VVXn/9dR588EHWr1/PRRddFJIb3XhgQhpDHA4HBw8eHNSKvH//ftLT04dcf9Ja0NtsNlwuF2eeeeagjwk2ie3o0aPU1dWRkZFBSkqKTtLarESWZd38oF3sQV/oqorB9QxG9x9RURBQUYkHMRHFkI0UeSNm+49RiUJQapGVTm9JRU4kOsKCHHU3qvGUPk/7pwP7eWjH56hApNFItMmEgECSxUKH00GDzY5HOb6LfvPSbzDZYuGZL3dT32UjMSqSWwpOJS8mVn+tWq02Li4Ok8lEY2MjsbGx/Zp0NNg67Nz3/Uc58HkpitS/pFAby6JzTuFH9/8Hqdn9KzmevfkV/vWHrRiMIpJHxu1wI4heaaUpwoTB6G10ajQauOsfN5O1qP+wKU1DrH2eWiPS3iQ92L6GbzeTvLy8flUGYwGn08mhQ4cwm83k5ORgMpn8gpa0H1VV+7grh3oDaWpq4mc/+xnR0dE8/PDDoy7hCyFOXmOIZiserEX8UI0d2m5+Y2Mjs2fPZs6cOXzxxRd+nv/eCHbDrqWlhYqKCm8Sm89MR/Prazu4vc0PWs3Q9wsdcDaiSBgdtyPKO1CJRMUECKiGGYABUT6MwfUcCjNQPZ9hFLswCoAoYDTGADKqUoFMX0K+OC+fz47WUtbWhqJ6jSQeReFoVxcmg0i02US8OYZvpqWTmTCJp/buwuHxYPd4iDAYsLpc3PHFJzy+ajVzs7L090Trdm2z2YiMjKSjo4MDBw74lXV6L4lfuPU1Kr+swWg04B6IkAX48cb/4vz/Wdn/MT1ob+rwhjgpEpKzRwrYE1zkcXoQo72bS6JBxN7RPdBTBdQQDxTlqb1O347aWoksJSWFgoKCMWtO2huqquqttrKzs/1eU39BS9q129TURHl5uV8rLd1hGeCGqygKmzdv5qGHHuLuu+/mO9/5zoSZFftiQhKyxWIJ6s4bLCH7WpJTU1P9lBla5+feBOi76TCQw66rq4vDhw/3TWLrB7675FpYjZYo5rvJBPh9oRNMryDK24BIQEakG2/HkExAQBXikJ376LK5SYrt8rmfqwhqHaqQgiok9h0QYDYY2LhyFZ/W1dLlduORZDbt3UWE0fueRJtMdHncXLNkKee89hJ2H4ekS5axmEy4ZJl/VpQxNykZVVWpr6+ntraWGTNmkJKSopt9NNWDb96DNpOOi4ujtLgcQYDI6Agkj4wiByZlAXj5rtfZtnk71z7xI1Jm923l1dFs5d8vbeWrbQeRXP1fJ7LkjRiNsJjJXDBjgE8vMAJFefoaPbSO2uCdFIiiyOzZs/skpo0l7HY7hw4d0kslwazS+lN4aDK81tZWqqqq8Hg8utxw165dZGZmsmHDBuLj49myZYsf8U80TEhCDhaDJb5pM9fy8vJ+2zJp5hDfCzIYh51vElt2dvaIlpy+iWIafLsvHzlyhIjJ/yLK7C1SiKKIQQQBD6K8DxUPkizg8aSQFFcb4AweFEMWirGvplbpMXmYDQa9aWlxQ72f1dt7jMB7FWV+ZAzeGphTkjH2mEk0R6OWBe17Y+1P9eBL0pbJkTTVeLzxm/2Qsfdx3vJGxZ4a7ljzAI/uuBdFUfjL/W+y4597sbZ0Yeuwo8qDV+lEUSQtJ4VrN/2I2H5UIUOFr9FDa1ZQXV1NSkoKRqORY8eOUVXlVcz4lrBGs1YbCJqss6mpiby8vBFHhgqCoK8CNdec9pm2trby5ptvsnv3bkRRJDc3l82bN/PjH/94NF7KCYmTmpCNRmO/hGy1WiktLSUiImLAtky+So1gjR3V1dV6pvJgZZXhonecock+DUFuBLUTEEBRUFS8TUtVAwYRoiOP4N3s6w0B2fRDEI5fLi5J4qEdX7CltgaTKHLVgkV8NzcfgEVTp5E7eTIHW44hqypGQeSKUxZQ3dkZcKwuSSLWYmEuBioqKoakKOlN0j9/+qfcvvp+Olusgz9YBUWWcdpdVO2r4blb/kTllzW6PC4YGEwGrvzt5Zz/o+AzFIYCu91OSUkJ0dHRFBYW9pkQ+Pa3O3r0qN7fLjo62o+kR2Ozr6uri0OHDjF58mRd8x0KCIJAR0cHN954I4mJiezZs4fExEQaGhpobe2rlplImJCEHCzBGY1GHJrkqgcOh4OysjJcLldQmRYaqcuyPGidWGsVlJqaytKlS8d0uamIhRjlXXiDjVVUBCQpEllIx2AQ8MgeIqhCRUQUfAhJAIhEFf3fh6f27ubjmmpizGZkVWHTnl2kxsaybHoasqKwalYWseYIooxGvpGaTrfHzaGWZoyC0Ed3PN1i4cfTM5iXns6UKVNGdIOakpHE7z6/i9c3vsPbj7+Pq3vgnoUel4Qk2djwwyfpbLYOiYwRwBIbRcGqBYMfO0QoiqLfuHNzc/t1ovWXI6ytjhoaGujq6kJRFJ2ktVJWsCStbSC2t7f32+dvtKAoCn/605949NFHuffee1m9erV+PaSkpOiz6ImKCUnIEHzAkDZD1ppc+gawBOOwE0WRpqYmfVYSiGTb2tooLy8nISFh2C16RgS5GYPnz4C3c4eiGHFJaURFHMMoxoNgwGTsRFBEr+qCFl1P4/bE4ZZj2F9+hJhYpz7r2lFfR6TRiCgIiIKBblViT1Mji6amcM2//9mzwacgCiLbamtodzn98pLBy/WxBiM35M9j5fwFo7bUtsRGcfENq/l083Yaq44Nerwqq7TVdwxY4giEWfPSue7p/yU5fXRrmppufurUqcOaiQYqYflapjWDlLah5kvSva9NzfasbSCGciOtvr6e6667jqlTp7J169YJETg/VExI2RsEl4lstVqprq4mPj5el5qlpqYGlU2slSccDgfNzc1YrVZdZ6p9GYxGI7W1tYiiSHZ29ojaPY0EJvvViHLR8awIAVRiUMVZCLjR7BwqFgS1HVQbAh2oxICYhBTxM5xqgS5Js1qtPFR6kDqXk2iTCYPBgF2S+N9FS0iyWPjNZ9sw98QttjuO5yRrMAgCC+MTSDZHcEXBUk6ZnhqS173/0xJuX30fShB14KEiJsHCI0X3kJQaeKNzOPB4PJSVleF0OsnLywv59eK7oaZ9rlquRUxMDF1dXUiSxNy5c4PupD4cKIrCK6+8wuOPP859993H+eefPxEVFCev7A0GnyGrqkp7ezvNzc1YLBaWLVsW1BKu94adxWJh5syZ+t/dbjdtbW1UV1frBB0dHa0nbA0UUBMKSB4XBveB42UUoaesgg3ZuArFuABBbUMVU1HFdAyed0A5iipMRTXkooqpICZhBj9DwB0Z6fzsg/dxejwoskSi0cRnhw6w02rFKnkQAZPBGDBeVFVVVmflctnCRSF5zaqqsuv9L6nadwTRYECRR9bVpTcs8ZFccOtK6o7V0ulsH/Hn6mt6mTVrFlOnTh0TQupvQ+3o0aNUVlbqpYm9e/cSFRXlN5MerWv46NGjXHvttaSmprJt27YJ0Tl6JJiwM2SPx9Nv1rC2JIyKisLhcLBs2bJBny9YY0dtbS0NDQ3MnDlTzwXQsg86OzuxWq243e4+NunRLmP41qxPz/sVRkM73sBL72xVxQziTCTz5Sjmi4Z1jia7jS+bmzCLBnY2NvBOxWEcHk+fGXFviILAplWr+WZa+rDOOxBUVWXT9S/y2RvFSB4JyS3jcXn0+YnRbERyS0FdyQajiCz12K8FgZ8++t98a+1pmCNMfuoO7Wco6XAaHA4Hhw4dIjIykuzs7LEvZ/nA7Xbr+c15eXn62LW8FV9Di6/c0Jekg72RKIrCSy+9xJNPPskDDzzAqlWrJuKs2BdBvbiTipC1MHdVVcnJycFisbB9+3a++c3+IxKDNXY0NzdTVVXF1KlTycjIGLAe6muT1n5kWSY6Opr4+PihO/B6QQsjiouLIzMzkwh5Myb37/B2AQEQUMgEQwIQebx10whw0euv0ely0eZwIKuqT58RL3wvnFiDgScXLyPB57WOFhHVldZzyznerFtBEFBkhW67t5+fKArEJFloO9qBIg1+Kd/80k8p21WJ5JFZdeVZpOcOHK84EEn3Ji7flk65ubnjWi/VkgSrqqrIzMzUJYWDPUYLH/J1VwbTA7Curo5169Yxc+ZMHnjggVHvtn2CIlyy0OB2uykvL8dqtfZxFPWHYIgYvFmsZWVlenBRMLkYvula2lLR18VUX1+vy5d6O/AGqm87nU7KysqQJIk5c+bozSEV41o8gojR/SKC2opKKhimAg4QRt512eZ202i3Y/d4epx6XkQaDLppxiCIyD2bfOvP+BbZU1OwWq16Jq92Q/K1hA9HqmXv7MZgFPW6sdvlQZEUVIOCoop0NnZ5zTwMrKbIXZrFaRct5bSL+o//7I3BdNIdHR3U1tbicDjweDzExsaSmZk5bnsL4L1mSkpKMJlMQ9pwDhQ+1LsHoNZ012w209jYSENDAx0dHbrj7uyzz57os+IhY8ISMnjlOjU1NXqYe35+flDKiWAcdg6Hg/LyciRJIi8vb8RSoEAOPE1j2tnZSW1tLTabzW8HPT4+HovFos+2mpubmT17tl/al+j5GIP7NcCNbFqDKO1GoBPUdgRUJPPIRfbP7N2NgOAXMA/glL0OtuXpM5BVBVlRuTg3n+UzZgL43ZC0DSbNVltWVubXtDTYVUN6XipGs9FLzAYDbocbBG+pQhRF3A63bnvuDVOkifScFAq/vYhLb14z4vcF/IkrMTGRiooKVFUlPz8fSZJ0x2F/M+lQwTchLicnZ1Tcb/31AHS5XDQ1NbF582YaGhqIiori3nvvJT4+nqVLg7/hnQyYsCWLpqYmDhw4wPTp05kxY0a/M8vPP/+cb3zjG/omoO+GXSAi1uIf29rayMrKGnMbpyRJfkvizs5OPB4PcXFxpKamkpCQoC8TBelLTM77UIU4wIigtiIbzwMhEnCgGAtQDX2zKYaKn7z3Djsb6r05xCrIqIhAlMlElNEEqGxYcS4FKUPrqKCtGrTau7Zq6E3SvT/bmgN1PPy/T1Nf2YjL7iYyOgJjTzaxqno7g3Qe8zeOGIwizx1+mElTRn/5rJW0Kisr/azgvY/xrdP2LndoPyNtMw/etmKHDh0iJiaG2bNnhzQhTlEUXnjhBZ599lkeeughVq5ciSAItLa26oqkkwQnd8lCczYNdgGLoojcM5MLNoktPT2dpUuXjstyy2g0kpiYiNFopKWlhaSkJDIyMnA4HFitVhobG3E6nURGRjJz6hYSoxREk9mb3UwcgnIQyfLAqI1HVVVKW1tw9FIyRBmNxPXM8FySRI21c8iEHCj7QGuUabVaqaurw2az6WoBjbQik0xctuF80tLSeO1Xb7H7g33Ikowiq1jiorjjjRu5ffX9OLocIAgYDCKnf3dZSMjY4XBQWlqK0WhkyZIl/V6PmmLHYrH4lTs0kvbt5DFckva1PQ9kNhkt1NTUcM0115CXl8dnn33mt4qcyHkUI8GEJWSLxTJgTgUcb09eV1fHpEmTAsYeqqpKa2srFRUVumU01JmzA0GL/nQ6neTm5upkFR0drUvStFqeZDuAorhwd9dgELoRBRWHvJBuR+uoKDtUVWV/dRXNjr4JZ56em5uiqoiCQEbc6JCdKIp9nGla1GNLSwuVlZW64cFut3Ppry8gZnI0JV+Uk5SayA/vvZy0nBTu/+B2/nDba7Q2tLNwxTwuv/XCURmfBkVR9N6HOTk5fsFBwWI0SXqsbM/gfe3PPfccL7zwAr/73e9Yvnx5yCYv7733Htdddx2yLPOjH/2IW265JeBxmzdv5tJLL6W4uJiCgoKQjGU0MGFLFoqi9EvIvht2drudlpYW3dih7RLHx8cjiiI1NTWYzWaysrKCbokTCvhK6rQ68aAXudKK2XY5AsfQPjpVNVPd9jOOtswekbKjq6uL0tJSXCYjPy7+os/fIwwGEiIikVSFS/LmcM3iwpB9KX1txlr+tZaWpi3/7XY7BoPBj7RGGg7fH6xWKyUlJSQmJjJr1qyQhv3AcZLWJGm9JXh2ux2Hw8GcOXP8VhuhQFVVFevWrWPu3Lncd999+sZyKCDLMjk5Ofz73/8mLS2NwsJC/vSnPzFnzhy/47q6uli9ejVut5vHHntsvAj55C5ZBEKgDbveS2ItZUqbhZrNZgwGA42NjX4OvLEcs9bIc+rUqUNqbioqlRy/h/b00BPczJz8CKkZzyMLuUNWdng8HsrLy7Hb7eTm5hITE0PE7h24ZH/VwpKpKfxs2TeIj4ggJSZ0JNDa2kpZWRnTpk3zm/n5pqX5jl0jrIqKCux2u5+zcqRtliRJoqKigq6urpBnPvjCdybt2xNP2xyNiorCaDSyf//+kNSkwUuOzz77LH/84x95+OGHOfPMM0Ne0tuxYwdZWVlkZmYCsHbtWt58880+hHz77bdz8803s2HDhpCOZzQwYQk50KbJYJGYsixTX1/fR63g21qpvLzcLxA+Pj5+UDnacKG1oIqIiOjTyDM4uAGJ453uvOQs4MBk/wVqzB+IjU0MWtkhyzIdHR3MmjXLr3HltzOzeKv8sDdmUxCwGE3MmzKFvMmh6+bgdDp1TXmwDT0D5Q77dvBobGzE4XDojTz1mnQQ3Za1pp7p6enk5OSMq5xLkiT9prlkyRJdVuc7kx6NmrSGyspK1q1bx4IFC/j0009DOiv2xdGjR0lPP24uSktLo6ioyO+YPXv2cOTIES644IIwIZ8ICNbYMVASWyDNsPYlPnLkCF1dXX7L4fj4+BHNtNxuNxUVFdhstqAS5/qDIuZ4N/LooHe1SaARg+dfyBH+beoDpYe1trbqHZ6jo6Opqamhvr5eL3X87ykLONByjE6XCxWYEh3NFacsHNaYB31NikJdXR319fV9JH7DQaAOHr5dpTUtbUREhP56fSVpTqeT0tJSRFEc9aaew0FLSwtlZWVkZGSQm5vrdw32N5PWSLqtrU0nad8uHv2RtCzLPP3007zyyiv6rHgsEajc6vt6FUXhhhtu4A9/+MMYjmpkmLCE3NXVRWdnJwkJCToJByJILYktPj4+aGF8oI0lTY7W2dlJc3Nzn3p0MLpSjWyOHj3KzJkzh9U+3X+gyUiWhzB234DIEZ8/GAEVQSkb8OHaBqLL5WLBggV+Mx+t+7AmSVs3NZVayUNMdDRL09KJCsEMsaOjg8OHD+sbU6GqzUZERJCcnKyTvbZJ2rtTCXjfB61R7Wgt/4cDj8dDaWkpkiSxaNGioPc7hkrSBoOBkpIS0tLSWL9+PUuWLOHTTz8dF3NLWloaR44cv67r6ur0Vmfg5YD9+/ezfPlyABobG1mzZg1vvfXWCbuxN2E39Xbs2MGNN95IZ2cneXl5LFmyhMLCQhYsWEBUVBSlpaV0d3eHNInN5XLphNXZ2alf1L4zLa0erXUmSU5OZubMmaNONmbrSgRaOb63ICCZr0GOvLLPsZpCwHcWGoyhxvf1jmZmh+a01JQlY7Uk7g+aYiEmJoZJkybp2cNjkVHSG74a58zMzBHnSQ90HofDQUVFBXfeeSdffvklFouF+fPnc/nll3PppZeO+jkHgyRJ5OTk8OGHH5KamkphYSGvvvoqc+cG7iC+fPlyNmzYEN7UGw8sXbqUTz75BI/Hw4EDB9i+fTuvvPIKN9xwA+3t7URERHDttdfyzW9+M2TLzIiICKZMmeJnLe1dj5YkSe84Mnv2bCZPnjz69WhVAiJ6frTNNwOqoW+4T1tbG2VlZSQlJQXcQJQVhe31dXQ4ncxJSmZWgnfTzNel5SvR0l6vryQtWGWHb2+9UJJNsJBlmcrKSjo6OsjPz++jWPCdWWr94bQ4S1+SHq1NYc32bDQaQ56zLQgCdXV13HTTTSxdupS//e1vREREUFlZqa8WxhpGo5HHHnuMVatWIcsyV155JXPnzuWOO+6goKCANWtGx205lpiwM+RAqKqq4sILL2TdunXMmjWL4uJiiouLOXz4MElJSRQUFLBkyRKWLl0a8ghELRC/o6OD1NRUVFXVZUvaJppGWiOWZ6kSZvtaVNWEgA2v4sKAFPVLFGMh4L9JlpOTEzD/VlYUbtnyIXuaGr3h/ILAr0//FqenZwQ9FN/Mjs7Ozn6VHVrrori4uJC7yYKBtoJJTU0lLS0t6M/D1xKufb4jDZLytT1rzRRCCUmSeOKJJ/jrX//Ko48+OmAYVxj94uROewsEbVndu76mbert2LGD7du3U1xcTHNzM1lZWSxZsoSCggIWLVpETEzMiEna98uUkZHB9OnT+zynrz26s7NzWPXo3jC4nsPgeRcVEwIeVGEqHstDKGqEnjo2WPDSZ3VH+PUnHxNpMCIIAm5Zxmww8Pallw/rvdDgq+zo7OyktbUVRVFITk5m8uTJembHeMyOXS4Xhw8fRlEUcnNzR0WL7ntT0kjaV7kzULNSzfYcHR1NVlZWyG9UJSUlXHvttZx22mmsX78+pFr8wUwemzZt4vHHH8dgMBATE8PTTz/dR+J2AiNMyCOBLMuUlpZSVFREUVERe/bswePxMH/+fJ2k58yZM6RlolYO0AwDQ/kyDaUeHRCqguj5J6L8FaqYjGy6hGOtHioqKpg2bRoZGRmDlkrerShj444vevIpvDcXm8fDR9//AeIo3Ki0CMiMjAymTJnSx9hhMpn8Xm8wcrSRjEe7cWZlZY1YzTEYfPvgaSQN6CQdGxtLW1ub3u051LZnSZJ47LHH+Nvf/sbjjz8eVGb4SBCMycNqterZF2+99RZPPPEE7733XkjHNYo4uWvII4XBYGDOnDnMmTOHH/7wh4B3drJnzx527NjB73//ew4ePEhsbKxO0IWFhaSlpfUhNofDoZcDTjnllGFtIAZTjx5QHy2IKObVKKymu7ub0oPefIVgNbwA+ZOTEPDOjE2iiN3jYf6UKSMmY7vdTmlpKZGRkX55D4E0w9oNSZOjaSHp2mseDaWDzWajpKSE2NjYMbPKB+qDp60ctM9Xq9M3NDRgt9uJi4vrt4/jSHDo0CHWrVvHt771LT799NMxcagGY/LwfW/sdvuEjO4ME/IQYLFYOO200zjttNOA4zkXO3bsoKioiFdffVUvRRQWFpKfn8/777/PueeeyxlnnDGsPIP+0F+msrb01/TRvvXomJgYGhsbaWtr0y3GQ8GshEncefq3uG/7Z3S53cyfMoX1Zywf9mvQuhlr4xls1mc2m/3aSPkqO7Ss4ZEoHXzHk5eXN+5JZIIg0NLSQnt7O0uWLCE2NlbP7bBardTU1GCz2TAYDH7ljujo6GGRlSRJPPLII/rss7CwMASvKjCCMXkAPP7442zcuBG3281HH300ZuMbK4RLFqMMRVEoLy9n48aNvP766+Tm5mKz2cjPz6egoICCggLmz58/ZgYCLXO3vr6eY8eOYTQa/TaU4uPjhzUWLTRouNCC6adPnx5wVTFcDLcbS1tbG4cPHyYlJYX09PSQhu8Eg46ODkpLS/UONAONp3cka3d3N0aj0Y+kB6vBHzx4kHXr1rFixQruuOOOMTe4/PWvf+X999/n2WefBeCll15ix44dPProowGPf/XVV3n//fd58cUXx3KYI0G4ZDEeEEWRtLQ0kpOTOXz4MJMmTcLtdvPVV19RVFTEc889x759+zCbzSxatEgn6aysrJCQgNPppLq6mqioKE4//XTMZrOfyaGurg6Xy6XPKjXSGmyZPlwy1so3oigO0w4+MAbqxqLdmHzrsxaLhfb2dgBdoz6e0PIwbDZb0OUtLZLVdwXm8Xh0gtaMSr6W8JiYGKKiopBlmYcffph33nmHJ554YtwME4OZPHpj7dq1/OQnPxmLoY0pwjPkcYAmcSsuLqaoqIgdO3ZQUVFBSkqKXo8uKCgILtGtH2iyOqvVOqj92ndW2dnZqUuzRjOvQ0ura2xsHHYc5WhCkiSqq6s5evQo0dHRSJIUsBvLWNYptaCktLQ0UlNTR/3cWg3earWyc+dO1q9fj8fjYebMmVx77bWceeaZultvrBGMyaOsrIzs7GwA3n77bdavX8/OnTvHZbzDQFhl8XWCqqrU1dWxfft2duzYwY4dO/TaqkbQCxcuHJQkfHM5+pPVBQPfevRI9dGaumTKlCkDdm8ZK2ga5+joaGbPnq3XmHsv/cdK2eHxeDh8+DAej4e8vLyQb6J5PB42btzIe++9p5Pyzp07ycrK4r/+679Ceu6B8O6773L99dfrJo/bbrvNz+Rx3XXX8cEHH2AymZg0aRKPPfaQL6L/AAAPR0lEQVRYv668ExBhQv66Q5IkDh06pGuj9+zZg6qqLFiwQCfp3NxcP/t1VVWV3m16tJ1bgfTRZrO533q0y+XSm67m5uaOeznANzc52I4ZvTM7RlvZ0dTURGVlJbNmzQq5GQlg3759XHvttZx//vnceuut45q/cZIhTMgTDZrra9euXfosurS0lJiYGFRVJTExkfvuu4/MzMwxW2r71qO1VvAa8XZ1dZGVlaXXcscT7e3tHD58OKhNsoEwWGZHfHw8sbGxg94MXS4XJSUlGAwGcnJyQk6MbrebDRs28MEHH7Bp0yYWLgxNGh8MbvDYuHEjzz77LEajkeTkZJ5//nlmzJgRsvGcIAgT8smAv//979x22218+9vfxmQysXPnTr3LthaotGjRIuLi4saEpDs6Ojh06JCea2Gz2ZBlmZiYGD+Vw1iVLTweD2VlZTidTvLy8kISIjUUZYdvPsdY2J4BvvzyS6677jouuOACbrnllpCSfzAGj48//phly5ZhsVh48skn2bJlC3/+859DNqYTBGFCPhlQU1PD5MmT/bpTKIpCWVmZXo/evXs3TqeTefPm6SQ9d+7cUf1iap1Euru79U4ivuPRXGjapuGo53X0gq/zb+bMmUybNm1MN+h6d8zW7NEejweLxcLs2bP1NmGhgsvl4sEHH+Tjjz/mqaeeYv78+SE7l4YvvviCO++8k/fffx+A3/72twD88pe/DHj8nj17uOaaa/jss89CPrZxRlj2dvvtt/Pmm28iiiJTpkzhD3/4w4BSmq8jAi31RFEkNzeX3NxcrrjiCsD75dy7dy/bt2/nySefZP/+/VgsFhYvXqzXo2fOnDlkgvDdROwvw9lXvZCWlgb416M1Ih+oHj0UOBwOfZYe6hS0/uDbMVtVVWpra6mvryczM1PvYK4F24dC2bF3716uu+46LrzwQrZt2zZm70GwBg8Nzz33HOeff/5YDO1rgQlNyDfddBN33303AL///e+566672LRp0ziPanwQERHBsmXL9EwCVVVpb2+nuLiY7du3s3nzZqqrq0lLS9MJesmSJSQmJvZLEJrFOCYmZsjEF0g7O1J9tG+b++E4EUMBm83GoUOHmDRpUsA400A3ppEoO1wuF/fffz+ffPIJzz//PKecckooXla/GKyLhy9efvlldu7cydatW0M9rK8NJjQhnwze9+FCEAQSExNZtWoVq1atAryEVlNTQ1FREVu3bmXDhg10dXX5BfzPnz8fl8vFF198QVJSErm5uaNmMQ7UqcPhcNDZ2ak7+/qrR3d2dlJaWkpSUlLI29wHA0VRqKqqorW1NWB2soZAN6bhZnbs3r2b66+/nosvvpgtW7aMy8ogWIPHBx98wD333MPWrVvHve3ViYQJX0O+7bbb+OMf/0h8fDwff/xxyFO7Jho8Hg/79+9n+/btFBUVsW3bNmw2G2eddRYrVqygsLCQ7OzskLe61xCoHu1yuRAEgfT0dKZMmTJuUZ0aOjs7KSkpGbGiQ8NAyo6vvvqK6OhoiouL2bVrF0899dS4anODMXjs2bOHSy65hPfee083epwEODk29c4++2waGxv7/P6ee+7hO9/5jv7v3/72tzidTtavXz+Ww5tQuPfeeykrK+NXv/oVtbW1uj66rKyM5ORkv9S7UGtqfVsXpaWlER0d7Wfo8K1Ha8v+UEOWZcrLy7HZbOTl5YW01ZSm7HjmmWd44403aG1tJSEhgblz57Jhwwa9a8t4YDCDx9lnn82+fft0OWRGRgZvvfXWuI13jHByEHKwqKmpYfXq1ezfv3+8h/K1hSRJAWu4mpTLN+D/2LFjZGdns2TJEpYsWcLixYuHnULWG76ti/rT8AbSRw9VKzwUhNr23BtOp5N7772XoqIinn76afLz85EkiYMHD5KbmxsuA5x4CBOyr/f90UcfZevWrWzevHnUz3PTTTfx9ttv633xXnjhhZAHiJ/okGWZkpISPatj9+7dyLLcJ+B/KFnDmlqhoaFhyHkYvvVoX63wSPXRmu3Z7XaTn58/ZtnBN954I5dffjnXX399yPKaBzN4bNu2jeuvv56vvvqK1157jUsuuSQk45ggCBPyxRdfrEuLZsyYwaZNm0hNTR318/zrX/9ixYoVGI1GfvGLXwBw//33j/p5vu7o7u5m9+7dusvw0KFDxMXF+ZU6UlNTA5Ki1WqlpKRE77YyGjXr3vVom82GIAhBy9Cam5upqKgYM9uzw+HgN7/5Dbt37+app54iLy8vZOcKxuBRXV2N1Wplw4YNrFmzJkzIAyOsQ3799dfH5Dznnnuu/v+nnnpqSGbhEwEWi4XTTz+d008/HfDOWltaWvSA/5dffpm6ujpmzJihS++ys7N56aWXOPfcc5kzZ46f4WSk6E8f3dXVRWdnJxUVFXo92ld6JwgCJSUliKLo1+EklNi+fTs///nP+c///E82bNgQ8k3UYDp4zJw5E2DcFS0TCROakMcDzz//PN/73vfGexhfCwiCQHJyMqtXr2b16tWAd9ZaUVGh10Y/+eQT5s2bR2VlJYWFhRQUFHDKKaeErEZqNBqZNGmSn4bZ7XbrDVgrKiro7u4mNjaW5ORkurq6htSVZKjo7u7m7rvvZu/evbz22mvk5OSE5Dy9MVSDRxijgzAhB4lg1Bz33HMPRqOR//iP/xjr4U0YiKJIdnY2Ho+Hd955Ry9TfPnllxQVFfHMM8+wf/9+IiIi/AL+Z8+eHbKZmtlsJiYmhiNHjpCQkEBBQQEej4fOzk5aWlqorKwMSV7H559/zk033cQVV1zBxo0bx0xaCEMzeIQxeggTcpD44IMPBvz7iy++yD/+8Q8+/PDD8IU7CpgzZw5/+tOf9H8XFhZSWFjINddcg6qqdHZ26gH/t99+O5WVlUyfPp3FixfrM+mkpKQRfxaqqnLkyBHq6+vJzc3VZ84mkwmLxRKwK0ldXd2Q69G+sNvt3HXXXezfv5+//OUv46LVHWoHjzBGBxN6U2+s8N577/Gzn/2MrVu3htR48te//pU777yTQ4cOsWPHjnFrt3MiQlNgaKqO4uJi2tvb+wT8R0VFBU3Smu05ISGBzMzMIc9QfevRvvpo33q0rypDVVU+++wzfvGLX3DllVfyf//3f2M6K+499sEMHhr++7//mwsuuCC8qTcwwiqLsUJWVhYul4vJkycD3o29UGRmHDp0CFEU+fGPf8yGDRvChDwIJEniwIEDFBUV6QH/giD0CfjvTXpakH1LS8uod5/W6tGassPlclFcXMzRo0dpaGjg2LFjPP/888yePXvUzjlcDGbwKC4u5qKLLqK9vZ3IyEimTZvGgQMHxnvYJyrChDxRsXz58jAhDwNawP/OnTv1WXRpaSmJiYm69A6guLiYq666akzaTSmKwhtvvMFTTz1FREQEbrcbu93Ogw8+yMqVK0N67jDGFGHZWxhh+EIQBGJiYli+fDnLly8Hjucmb9u2jY0bN1JbW0t6ejoVFRV+Af+xsbGjvjdgs9m44447KC8v56WXXmLWrFmA12wiSdKoniuMrwfCAsITDGeffTbz5s3r8/Pmm2+O99AmJARBYNq0aciyzBVXXEFdXR1ffPEF999/PxkZGbz99ttceOGFnHHGGVx99dU888wz7N27F4/HM+xzqqrK1q1bOeecc1i4cCH/+te/dDIG74bhaPYffO+998jNzSUrK4v77ruvz99dLhff+973yMrKYtmyZVRXV4/aucMYGsIli68hwiWLsYfT6dQD/ouLizlw4ADR0dF+Af/BlDi6urq4/fbbqa6u5umnn9bNFaFCMI67J554gq+++opNmzbx2muv8cYbb5wMLZXGGuGSRRhDx2D5BScrIiMjOfXUUzn11FMB7yy3ra1ND/j/y1/+Qk1NDenp6X4B/5MmTUIQBFRVZcuWLdx666389Kc/ZdOmTWPicAvGcffmm29y5513AnDJJZfo0sKwfHPsESbkrxHeeOMN1q1bx7Fjx1i9ejULFy7Ue5eNBmRZ5qc//anfbGrNmjV+X94wvBAEgcmTJ3Peeedx3nnnAcfVGdu3b+fjjz/mwQcfpKuri5ycHJqbm4mKiuLtt98mIyNjzMYZjOPO9xij0Uh8fDytra1j0oA1DH+ECflrhIsuuoiLLrooZM8fzGwqjP4hiiKZmZlkZmby/e9/H/Bu0H311Ve8/fbb3HHHHWOe+xCM4y7syjtxECbkMHSE8wtGHyaTSc+EHg8E47jTjklLS0OSJDo7O4cUbRrG6CGssghDR3imNPFQWFhIWVkZVVVVuN1uXnvtNdasWeN3zJo1a3jxxRcB2Lx5MytWrAh/7uOEMCGHoWOs8wuuvPJKpkyZwrx580J2jpMdRqORxx57jFWrVpGfn89ll13G3LlzueOOO/S2SVdddRWtra1kZWWxcePGgNK4MMYGYdlbGDqGkl8wGti2bRsxMTH84Ac/CLfWCmOiIyx7C2No8J1NafkFoexgfOaZZ4ZNCGGE4YNwySIMP3z729/m8OHDVFRUcNttt433cMIIgLa2Ns455xyys7M555xzaG9vD3jceeedR0JCAhdccMEYjzCM4SJMyGGE8TXDfffdx8qVKykrK2PlypX91nxvuukmXnrppTEeXRgjQZiQwwjja4Y333yTK664AoArrriCv//97wGPW7lyJbGxsWM5tDBGiDAhh3FS4MiRI5x11lnk5+czd+5cHnnkkfEe0rDR1NSkdypJSUmhubl5nEcUxmghvKkXxrjh8ssvZ8uWLbS0tJCWlsb69eu56qqrQnIuo9HIQw89xOLFi+nq6mLJkiWcc845J6wLcaAejmFMXIQJOYxxg2/PvFAjJSVFn1XGxsaSn5/P0aNHT1hCHqiH49SpU2loaCAlJYWGhgamTJkyhiMLI5QIlyzCOOlQXV3Nnj17WLZs2XgPZVjwdda9+OKLetfzML7+GKoxJIwwvtYQBCEG2Arco6rq38Z7PMOBIAiTgb8AGUAtcKmqqm2CIBQAV6uq+qOe4z4B8oAYoBW4SlXV0YsHDGPUESbkME4aCIJgAv4BvK+q6sbxHk8YYfRGmJDDOCkgeNNyXgTaVFW9frzHE0YYgRAm5DBOCgiCcDrwCbAPUHp+fauqqu+O36jCCMMfYUIOI4wwwjhBEFZZhBFGGGGcIAgTchhhhBHGCYIwIYcRRhhhnCAIE3IYYYQRxgmCMCGHEUYYYZwgCBNyGGGEEcYJgv8PGw7lqkf8PeAAAAAASUVORK5CYII=\n",
586 | "text/plain": [
587 | ""
588 | ]
589 | },
590 | "metadata": {},
591 | "output_type": "display_data"
592 | }
593 | ],
594 | "source": [
595 | "ax = plt.axes(projection='3d')\n",
596 | "\n",
597 | "\n",
598 | "# Data for three-dimensional scattered points\n",
599 | "winetype = data_set.wine\n",
600 | "zdata = mu_result[:,0].numpy()\n",
601 | "xdata = mu_result[:,1].numpy()\n",
602 | "ydata = mu_result[:,2].numpy()\n",
603 | "ax.scatter3D(xdata, ydata, zdata, c=winetype);"
604 | ]
605 | },
606 | {
607 | "cell_type": "code",
608 | "execution_count": null,
609 | "metadata": {},
610 | "outputs": [],
611 | "source": []
612 | }
613 | ],
614 | "metadata": {
615 | "kernelspec": {
616 | "display_name": "Python 3",
617 | "language": "python",
618 | "name": "python3"
619 | },
620 | "language_info": {
621 | "codemirror_mode": {
622 | "name": "ipython",
623 | "version": 3
624 | },
625 | "file_extension": ".py",
626 | "mimetype": "text/x-python",
627 | "name": "python",
628 | "nbconvert_exporter": "python",
629 | "pygments_lexer": "ipython3",
630 | "version": "3.6.5"
631 | }
632 | },
633 | "nbformat": 4,
634 | "nbformat_minor": 2
635 | }
636 |
--------------------------------------------------------------------------------