├── README.md ├── day1-NN ├── FFNcasper.png ├── README.md ├── confusionmatrix.py ├── confusionmatrix.pyc ├── exercises_1.ipynb ├── exercises_1_all_code.ipynb ├── layer.png ├── mnist.npz ├── network.png └── notation.png ├── day2-Conv ├── Convnet demo.ipynb ├── ZCA vs. LCN.ipynb ├── confusionmatrix.py ├── convnet_exercise.ipynb ├── convnet_exercise_solution.ipynb └── mnist.npz ├── day3-RNN ├── README.md ├── RNN.ipynb ├── data_generator.py ├── decoder_attention.py └── enc-dec.png ├── day4-VAE ├── Tutorial.pdf ├── bayes by backprop.ipynb ├── bayes by backprop_solution.ipynb ├── mnist.npz ├── nade.ipynb ├── nade_solution.ipynb ├── vae.ipynb └── vae_solution.ipynb └── day5-ladder ├── .ipynb_checkpoints ├── autoencoders_exercise-checkpoint.ipynb └── vae_sampling_from_manifold-checkpoint.ipynb ├── autoencoders_exercise.ipynb ├── mnist.npz ├── prob_to_denoising.pdf └── vae_sampling_from_manifold.ipynb /README.md: -------------------------------------------------------------------------------- 1 | ### Deep Learning DTU Summer School 2015 2 | 3 | Welcome to the DTU summer school 02901 Advanced Topics in Machine Learning - Deep Learning. 4 | 5 | Please see the course webpage for furhter details: [http://deeplearningdtu.github.io/Summerschool_2015/](http://deeplearningdtu.github.io/Summerschool_2015/) 6 | 7 | We will post links to excercises and other information on this page. 8 | 9 | [Official Course description](http://www2.compute.dtu.dk/courses/02901/) 10 | 11 | [Course Program](http://www2.compute.dtu.dk/courses/02901/courseprogram.pdf) 12 | 13 | 14 | -------------------------------------------------------------------------------- /day1-NN/FFNcasper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/FFNcasper.png -------------------------------------------------------------------------------- /day1-NN/README.md: -------------------------------------------------------------------------------- 1 | # day1-NN 2 | day1 3 | -------------------------------------------------------------------------------- /day1-NN/confusionmatrix.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class ConfusionMatrix: 5 | """ 6 | Simple confusion matrix class 7 | row is the true class, column is the predicted class 8 | """ 9 | def __init__(self, num_classes, class_names=None): 10 | self.n_classes = num_classes 11 | if class_names is None: 12 | self.class_names = map(str, range(num_classes)) 13 | else: 14 | self.class_names = class_names 15 | 16 | # find max class_name and pad 17 | max_len = max(map(len, self.class_names)) 18 | self.max_len = max_len 19 | for idx, name in enumerate(self.class_names): 20 | if len(self.class_names) < max_len: 21 | self.class_names[idx] = name + " "*(max_len-len(name)) 22 | 23 | self.mat = np.zeros((num_classes,num_classes),dtype='int') 24 | 25 | def __str__(self): 26 | # calucate row and column sums 27 | col_sum = np.sum(self.mat, axis=1) 28 | row_sum = np.sum(self.mat, axis=0) 29 | 30 | s = [] 31 | 32 | mat_str = self.mat.__str__() 33 | mat_str = mat_str.replace('[','').replace(']','').split('\n') 34 | 35 | for idx, row in enumerate(mat_str): 36 | if idx == 0: 37 | pad = " " 38 | else: 39 | pad = "" 40 | class_name = self.class_names[idx] 41 | class_name = " " + class_name + " |" 42 | row_str = class_name + pad + row 43 | row_str += " |" + str(col_sum[idx]) 44 | s.append(row_str) 45 | 46 | row_sum = [(self.max_len+4)*" "+" ".join(map(str, row_sum))] 47 | hline = [(1+self.max_len)*" "+"-"*len(row_sum[0])] 48 | 49 | s = hline + s + hline + row_sum 50 | 51 | # add linebreaks 52 | s_out = [line+'\n' for line in s] 53 | return "".join(s_out) 54 | 55 | def batch_add(self, targets, preds): 56 | assert targets.shape == preds.shape 57 | assert len(targets) == len(preds) 58 | assert max(targets) < self.n_classes 59 | assert max(preds) < self.n_classes 60 | targets = targets.flatten() 61 | preds = preds.flatten() 62 | for i in range(len(targets)): 63 | self.mat[targets[i], preds[i]] += 1 64 | 65 | def get_errors(self): 66 | tp = np.asarray(np.diag(self.mat).flatten(),dtype='float') 67 | fn = np.asarray(np.sum(self.mat, axis=1).flatten(),dtype='float') - tp 68 | fp = np.asarray(np.sum(self.mat, axis=0).flatten(),dtype='float') - tp 69 | tn = np.asarray(np.sum(self.mat)*np.ones(self.n_classes).flatten(), 70 | dtype='float') - tp - fn - fp 71 | return tp, fn, fp, tn 72 | 73 | def accuracy(self): 74 | """ 75 | Calculates global accuracy 76 | :return: accuracy 77 | :example: >>> conf = ConfusionMatrix(3) 78 | >>> conf.batchAdd([0,0,1],[0,0,2]) 79 | >>> print conf.accuracy() 80 | """ 81 | tp, _, _, _ = self.get_errors() 82 | n_samples = np.sum(self.mat) 83 | return np.sum(tp) / n_samples 84 | 85 | def sensitivity(self): 86 | tp, tn, fp, fn = self.get_errors() 87 | res = tp / (tp + fn) 88 | res = res[~np.isnan(res)] 89 | return res 90 | 91 | def specificity(self): 92 | tp, tn, fp, fn = self.get_errors() 93 | res = tn / (tn + fp) 94 | res = res[~np.isnan(res)] 95 | return res 96 | 97 | def positive_predictive_value(self): 98 | tp, tn, fp, fn = self.get_errors() 99 | res = tp / (tp + fp) 100 | res = res[~np.isnan(res)] 101 | return res 102 | 103 | def negative_predictive_value(self): 104 | tp, tn, fp, fn = self.get_errors() 105 | res = tn / (tn + fn) 106 | res = res[~np.isnan(res)] 107 | return res 108 | 109 | def false_positive_rate(self): 110 | tp, tn, fp, fn = self.get_errors() 111 | res = fp / (fp + tn) 112 | res = res[~np.isnan(res)] 113 | return res 114 | 115 | def false_discovery_rate(self): 116 | tp, tn, fp, fn = self.get_errors() 117 | res = fp / (tp + fp) 118 | res = res[~np.isnan(res)] 119 | return res 120 | 121 | def F1(self): 122 | tp, tn, fp, fn = self.get_errors() 123 | res = (2*tp) / (2*tp + fp + fn) 124 | res = res[~np.isnan(res)] 125 | return res 126 | 127 | def matthews_correlation(self): 128 | tp, tn, fp, fn = self.get_errors() 129 | numerator = tp*tn - fp*fn 130 | denominator = np.sqrt((tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) 131 | res = numerator / denominator 132 | res = res[~np.isnan(res)] 133 | return res 134 | -------------------------------------------------------------------------------- /day1-NN/confusionmatrix.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/confusionmatrix.pyc -------------------------------------------------------------------------------- /day1-NN/layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/layer.png -------------------------------------------------------------------------------- /day1-NN/mnist.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/mnist.npz -------------------------------------------------------------------------------- /day1-NN/network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/network.png -------------------------------------------------------------------------------- /day1-NN/notation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day1-NN/notation.png -------------------------------------------------------------------------------- /day2-Conv/confusionmatrix.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class ConfusionMatrix: 5 | """ 6 | Simple confusion matrix class 7 | row is the true class, column is the predicted class 8 | """ 9 | def __init__(self, num_classes, class_names=None): 10 | self.n_classes = num_classes 11 | if class_names is None: 12 | self.class_names = map(str, range(num_classes)) 13 | else: 14 | self.class_names = class_names 15 | 16 | # find max class_name and pad 17 | max_len = max(map(len, self.class_names)) 18 | self.max_len = max_len 19 | for idx, name in enumerate(self.class_names): 20 | if len(self.class_names) < max_len: 21 | self.class_names[idx] = name + " "*(max_len-len(name)) 22 | 23 | self.mat = np.zeros((num_classes,num_classes),dtype='int') 24 | 25 | def __str__(self): 26 | # calucate row and column sums 27 | col_sum = np.sum(self.mat, axis=1) 28 | row_sum = np.sum(self.mat, axis=0) 29 | 30 | s = [] 31 | 32 | mat_str = self.mat.__str__() 33 | mat_str = mat_str.replace('[','').replace(']','').split('\n') 34 | 35 | for idx, row in enumerate(mat_str): 36 | if idx == 0: 37 | pad = " " 38 | else: 39 | pad = "" 40 | class_name = self.class_names[idx] 41 | class_name = " " + class_name + " |" 42 | row_str = class_name + pad + row 43 | row_str += " |" + str(col_sum[idx]) 44 | s.append(row_str) 45 | 46 | row_sum = [(self.max_len+4)*" "+" ".join(map(str, row_sum))] 47 | hline = [(1+self.max_len)*" "+"-"*len(row_sum[0])] 48 | 49 | s = hline + s + hline + row_sum 50 | 51 | # add linebreaks 52 | s_out = [line+'\n' for line in s] 53 | return "".join(s_out) 54 | 55 | def batch_add(self, targets, preds): 56 | assert targets.shape == preds.shape 57 | assert len(targets) == len(preds) 58 | assert max(targets) < self.n_classes 59 | assert max(preds) < self.n_classes 60 | targets = targets.flatten() 61 | preds = preds.flatten() 62 | for i in range(len(targets)): 63 | self.mat[targets[i], preds[i]] += 1 64 | 65 | def get_errors(self): 66 | tp = np.asarray(np.diag(self.mat).flatten(),dtype='float') 67 | fn = np.asarray(np.sum(self.mat, axis=1).flatten(),dtype='float') - tp 68 | fp = np.asarray(np.sum(self.mat, axis=0).flatten(),dtype='float') - tp 69 | tn = np.asarray(np.sum(self.mat)*np.ones(self.n_classes).flatten(), 70 | dtype='float') - tp - fn - fp 71 | return tp, fn, fp, tn 72 | 73 | def accuracy(self): 74 | """ 75 | Calculates global accuracy 76 | :return: accuracy 77 | :example: >>> conf = ConfusionMatrix(3) 78 | >>> conf.batchAdd([0,0,1],[0,0,2]) 79 | >>> print conf.accuracy() 80 | """ 81 | tp, _, _, _ = self.get_errors() 82 | n_samples = np.sum(self.mat) 83 | return np.sum(tp) / n_samples 84 | 85 | def sensitivity(self): 86 | tp, tn, fp, fn = self.get_errors() 87 | res = tp / (tp + fn) 88 | res = res[~np.isnan(res)] 89 | return res 90 | 91 | def specificity(self): 92 | tp, tn, fp, fn = self.get_errors() 93 | res = tn / (tn + fp) 94 | res = res[~np.isnan(res)] 95 | return res 96 | 97 | def positive_predictive_value(self): 98 | tp, tn, fp, fn = self.get_errors() 99 | res = tp / (tp + fp) 100 | res = res[~np.isnan(res)] 101 | return res 102 | 103 | def negative_predictive_value(self): 104 | tp, tn, fp, fn = self.get_errors() 105 | res = tn / (tn + fn) 106 | res = res[~np.isnan(res)] 107 | return res 108 | 109 | def false_positive_rate(self): 110 | tp, tn, fp, fn = self.get_errors() 111 | res = fp / (fp + tn) 112 | res = res[~np.isnan(res)] 113 | return res 114 | 115 | def false_discovery_rate(self): 116 | tp, tn, fp, fn = self.get_errors() 117 | res = fp / (tp + fp) 118 | res = res[~np.isnan(res)] 119 | return res 120 | 121 | def F1(self): 122 | tp, tn, fp, fn = self.get_errors() 123 | res = (2*tp) / (2*tp + fp + fn) 124 | res = res[~np.isnan(res)] 125 | return res 126 | 127 | def matthews_correlation(self): 128 | tp, tn, fp, fn = self.get_errors() 129 | numerator = tp*tn - fp*fn 130 | denominator = np.sqrt((tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) 131 | res = numerator / denominator 132 | res = res[~np.isnan(res)] 133 | return res 134 | -------------------------------------------------------------------------------- /day2-Conv/convnet_exercise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Convnet exercise\n", 8 | "\n", 9 | "In this exercise you will be implementing a (pretty slow) convnet! " 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## Preliminaries" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": { 23 | "collapsed": true 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "import numpy as np\n", 28 | "import scipy.signal\n", 29 | "import matplotlib.pyplot as plt\n", 30 | "import matplotlib\n", 31 | "%matplotlib inline" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "## Feedforward network from yesterday's assignment\n", 39 | "\n", 40 | "We extend on the neural network from yesterday which is provided (in full) below. Just execute the cell." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "collapsed": true 48 | }, 49 | "outputs": [], 50 | "source": [ 51 | "def onehot(t, num_classes):\n", 52 | " out = np.zeros((t.shape[0], num_classes))\n", 53 | " for row, col in enumerate(t):\n", 54 | " out[row, col] = 1\n", 55 | " return out\n", 56 | "\n", 57 | "def linear(x):\n", 58 | " return x\n", 59 | "\n", 60 | "def sigmoid(x):\n", 61 | " return 1 / (1 + np.exp(-x))\n", 62 | "\n", 63 | "def tanh(x):\n", 64 | " return (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))\n", 65 | "\n", 66 | "def relu(x):\n", 67 | " return np.maximum(0, x)\n", 68 | "\n", 69 | "def softplus(x):\n", 70 | " return np.log(np.exp(x) + 1)\n", 71 | "\n", 72 | "class LinearLayer():\n", 73 | " def __init__(self, num_inputs, num_units, scale=0.01):\n", 74 | " self.num_units = num_units\n", 75 | " self.num_inputs = num_inputs\n", 76 | " self.W = np.random.normal(size=(num_inputs, num_units), scale=scale)\n", 77 | " self.b = np.zeros(num_units)\n", 78 | "\n", 79 | " def __str__(self): \n", 80 | " return \"LinearLayer(%i, %i)\" % (self.num_inputs, self.num_units)\n", 81 | "\n", 82 | " def fprop(self, x, *args):\n", 83 | " self.x = x\n", 84 | " self.a = np.dot(x, self.W) + self.b\n", 85 | " return self.a\n", 86 | " \n", 87 | " def bprop(self, delta_in):\n", 88 | " x_t = np.transpose(self.x)\n", 89 | " self.grad_W = np.dot(x_t, delta_in)\n", 90 | " self.grad_b = delta_in.sum(axis=0)\n", 91 | " W_T = np.transpose(self.W)\n", 92 | " self.delta_out = np.dot(delta_in,W_T)\n", 93 | " return self.delta_out\n", 94 | " \n", 95 | " def update_params(self, lr):\n", 96 | " self.W = self.W - self.grad_W*lr\n", 97 | " self.b = self.b - self.grad_b*lr\n", 98 | " \n", 99 | "class SigmoidActivationLayer():\n", 100 | " def __str__(self): \n", 101 | " return \"Sigmoid()\"\n", 102 | " \n", 103 | " def fprop(self, x, train=True):\n", 104 | " self.a = 1.0 / (1+np.exp(-x))\n", 105 | " return self.a\n", 106 | " \n", 107 | " def bprop(self, delta_in):\n", 108 | " delta_out = self.a * (1 - self.a)*delta_in\n", 109 | " return delta_out\n", 110 | " \n", 111 | " def update_params(self, lr):\n", 112 | " pass\n", 113 | " \n", 114 | "class TanhActivationLayer():\n", 115 | " def __str__(self): \n", 116 | " return \"Tanh()\"\n", 117 | " \n", 118 | " def fprop(self, x, train=True):\n", 119 | " self.a = (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))\n", 120 | " return self.a\n", 121 | "\n", 122 | " def bprop(self, delta_in):\n", 123 | " delta_out = (1 - ((np.exp(self.a)-np.exp(-self.a)) / (np.exp(self.a)+np.exp(-self.a))))*delta_in\n", 124 | " return delta_out\n", 125 | " \n", 126 | " def update_params(self, lr):\n", 127 | " pass\n", 128 | "\n", 129 | "class ReluActivationLayer():\n", 130 | " def __str__(self): \n", 131 | " return \"ReLU()\"\n", 132 | "\n", 133 | " def fprop(self, x, train=True):\n", 134 | " self.a = np.maximum(0, x)\n", 135 | " return self.a\n", 136 | " \n", 137 | " def bprop(self, delta_in):\n", 138 | " return delta_in * (self.a > 0).astype(self.a.dtype)\n", 139 | " \n", 140 | " def update_params(self, lr):\n", 141 | " pass\n", 142 | " \n", 143 | "class SoftplusActivationLayer():\n", 144 | " def __str__(self):\n", 145 | " return \"Softplus()\"\n", 146 | " \n", 147 | " def fprop(self, x, train=True):\n", 148 | " self.a = np.log(np.exp(x) + 1)\n", 149 | " return self.a\n", 150 | " \n", 151 | " def bprop(self, delta_in):\n", 152 | " return delta_in * (1./(1.+np.exp(-x)))\n", 153 | " \n", 154 | " def update_params(self, lr):\n", 155 | " pass\n", 156 | "\n", 157 | " \n", 158 | "class SoftmaxActivationLayer():\n", 159 | " def __str__(self): \n", 160 | " return \"Softmax()\"\n", 161 | " \n", 162 | " def fprop(self, x, train=True):\n", 163 | " x_exp = np.exp(x)\n", 164 | " normalizer = x_exp.sum(axis=-1, keepdims=True)\n", 165 | " self.a = x_exp / normalizer\n", 166 | " return self.a\n", 167 | " \n", 168 | " def bprop(self, delta_in):\n", 169 | " return delta_in\n", 170 | " \n", 171 | " def update_params(self, lr):\n", 172 | " pass\n", 173 | "\n", 174 | "class MeanSquaredLoss():\n", 175 | " def __str__(self): \n", 176 | " return \"MeanSquaredLoss()\"\n", 177 | " \n", 178 | " def fprop(self, x, t):\n", 179 | " num_batches = x.shape[0]\n", 180 | " cost = 0.5 * (x-t)**2 / num_batches\n", 181 | " return np.mean(np.sum(cost, axis=-1))\n", 182 | " \n", 183 | " def bprop(self, y, t):\n", 184 | " num_batches = y.shape[0]\n", 185 | " delta_out = (1./num_batches) * (y-t)\n", 186 | " return delta_out\n", 187 | " \n", 188 | " def update_params(self):\n", 189 | " pass\n", 190 | "\n", 191 | "class CrossEntropyLoss():\n", 192 | " def __str__(self): \n", 193 | " return \"CrossEntropyLoss()\"\n", 194 | " \n", 195 | " def fprop(self, x, t):\n", 196 | " tol = 1e-8\n", 197 | " return np.mean(np.sum(-t * np.log(x + tol), axis=-1))\n", 198 | " \n", 199 | " def bprop(self, y, t):\n", 200 | " num_batches = y.shape[0]\n", 201 | " delta_out = (1./num_batches) * (y-t)\n", 202 | " return delta_out\n", 203 | " \n", 204 | " def update_params(self):\n", 205 | " pass" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "metadata": {}, 211 | "source": [ 212 | "## Load dataset" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "collapsed": false 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "data = np.load('mnist.npz')\n", 224 | "num_classes = 10\n", 225 | "x_train = data['X_train']\n", 226 | "targets_train = data['y_train']\n", 227 | "x_train = np.reshape(x_train, (-1, 1, 28, 28))\n", 228 | "targets_train = onehot(targets_train, num_classes)\n", 229 | "\n", 230 | "mean = np.mean(x_train)\n", 231 | "std = np.std(x_train)\n", 232 | "x_train -= mean\n", 233 | "x_train /= std" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "## Gradient checking\n", 241 | "\n", 242 | "In order to verify the correctness of your layers, you will need to [check their gradients numerically](http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization). Below, we have implemented gradient checking functionality for you. Just execute the cell." 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": { 249 | "collapsed": false 250 | }, 251 | "outputs": [], 252 | "source": [ 253 | "def gradclose(a, b, rtol=None, atol=None):\n", 254 | " rtol = 1e-05 if rtol is None else rtol\n", 255 | " atol = 1e-08 if atol is None else atol\n", 256 | " diff = abs(a - b) - atol - rtol * (abs(a) + abs(b))\n", 257 | " is_close = np.all(diff < 0)\n", 258 | " if not is_close:\n", 259 | " denom = abs(a) + abs(b)\n", 260 | " mask = denom == 0\n", 261 | " rel_error = abs(a - b) / (denom + mask)\n", 262 | " rel_error[mask] = 0\n", 263 | " rel_error = np.max(rel_error)\n", 264 | " abs_error = np.max(abs(a - b))\n", 265 | " print('rel_error=%.4e, abs_error=%.4e, rtol=%.2e, atol=%.2e'\n", 266 | " % (rel_error, abs_error, rtol, atol))\n", 267 | " return is_close\n", 268 | "\n", 269 | "\n", 270 | "\n", 271 | "def approx_fprime(x, f, eps=None, *args):\n", 272 | " '''\n", 273 | " Central difference approximation of the gradient of a scalar function.\n", 274 | " '''\n", 275 | " if eps is None:\n", 276 | " eps = np.sqrt(np.finfo(np.float_).eps)\n", 277 | " grad = np.zeros_like(x)\n", 278 | " step = np.zeros_like(x)\n", 279 | " for idx in np.ndindex(x.shape):\n", 280 | " step[idx] = eps * max(abs(x[idx]), 1.0)\n", 281 | " grad[idx] = (f(*((x+step,) + args)) -\n", 282 | " f(*((x-step,) + args))) / (2*step[idx])\n", 283 | " step[idx] = 0.0\n", 284 | " return grad\n", 285 | "\n", 286 | "\n", 287 | "def check_grad(layer, x0, seed=1, eps=None, rtol=None, atol=None):\n", 288 | " '''\n", 289 | " Numerical gradient checking of layer bprop.\n", 290 | " '''\n", 291 | " # Check input gradient\n", 292 | " def fun(x):\n", 293 | " y = layer.fprop(x)\n", 294 | " return np.sum(y)\n", 295 | "\n", 296 | " def fun_grad(x):\n", 297 | " y = layer.fprop(x)\n", 298 | " y_grad = np.ones_like(y)\n", 299 | " x_grad = layer.bprop(y_grad)\n", 300 | " return x_grad\n", 301 | "\n", 302 | " g_approx = approx_fprime(x0, fun, eps)\n", 303 | " g_true = fun_grad(x0)\n", 304 | " if not gradclose(g_true, g_approx, rtol, atol):\n", 305 | " raise RuntimeError(\n", 306 | " 'Incorrect input gradient: \\nbprop:\\n%s\\napprox:\\n%s'\n", 307 | " % (g_true, g_approx)\n", 308 | " )\n", 309 | "\n", 310 | " # Check parameter gradients\n", 311 | " def fun(x, p_idx):\n", 312 | " param_array = layer.params()[p_idx]\n", 313 | " param_array *= 0\n", 314 | " param_array += x\n", 315 | " y = layer.fprop(x0)\n", 316 | " return np.sum(y)\n", 317 | "\n", 318 | " def fun_grad(x, p_idx):\n", 319 | " param_array = layer.params()[p_idx]\n", 320 | " param_array *= 0\n", 321 | " param_array += x\n", 322 | " out = layer.fprop(x0)\n", 323 | " y_grad = np.ones_like(out)\n", 324 | " layer.bprop(y_grad)\n", 325 | " param_grad = layer.grads()[p_idx]\n", 326 | " return param_grad\n", 327 | "\n", 328 | " for p_idx, p in enumerate(layer.params()):\n", 329 | " x = np.copy(layer.params()[p_idx])\n", 330 | " g_true = fun_grad(x, p_idx)\n", 331 | " g_approx = approx_fprime(x, fun, eps, p_idx)\n", 332 | " if not gradclose(g_true, g_approx, rtol, atol):\n", 333 | " raise RuntimeError(\n", 334 | " 'Incorrect parameter gradient: \\nbprop:\\n%s\\napprox:\\n%s'\n", 335 | " % (g_true, g_approx)\n", 336 | " )" 337 | ] 338 | }, 339 | { 340 | "cell_type": "markdown", 341 | "metadata": { 342 | "collapsed": false 343 | }, 344 | "source": [ 345 | "## Task #1: Convolution layer\n", 346 | "\n", 347 | "\n", 348 | "You should implement a 2D convolution layer by filling out the missing pieces and execute the cell. If the gradient check fails, you will get an error.\n", 349 | "\n", 350 | "### Bonus task:\n", 351 | "- Implement support for border modes `'full'` and `'valid'`" 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": null, 357 | "metadata": { 358 | "collapsed": false 359 | }, 360 | "outputs": [], 361 | "source": [ 362 | "def conv_bc01(imgs, filters, padding):\n", 363 | " \n", 364 | " # For input images in RGB format the channels are color channels \n", 365 | " # for Red, Green and Blue.\n", 366 | " batch_size, n_channels_img, img_h, img_w = imgs.shape\n", 367 | " n_filters, n_channels, win_h, win_w = filters.shape\n", 368 | " pad_y, pad_x = padding\n", 369 | " if n_channels != n_channels_img:\n", 370 | " raise ValueError('Mismatch in # of channels')\n", 371 | "\n", 372 | " # Create output array\n", 373 | " out_h = (img_h - win_h + 2*pad_y) + 1\n", 374 | " out_w = (img_w - win_w + 2*pad_x) + 1\n", 375 | " out_shape = (batch_size, n_filters, out_h, out_w)\n", 376 | " out = np.zeros(out_shape)\n", 377 | "\n", 378 | " # Pad input images\n", 379 | " imgs = np.pad(imgs, ((0, 0), (0, 0), padding, padding), mode='constant')\n", 380 | "\n", 381 | " #################### YOUR TASK ###########################\n", 382 | " # Remember that the input to a convolution layer is a 4d matrix with shape \n", 383 | " # (num_batch, channels, height, width). The convolution is parameterized with weights \n", 384 | " # in a filter filterbank. The filterbank is a matrix of shape \n", 385 | " # (num_filters, channels, filter_height, filter_width)\n", 386 | " # If we look at a single filter it has shape (channels, filter_height, filter_width). \n", 387 | " # For each filter we convolve the c’th channel with the c’th channel of the image input.\n", 388 | " # Keep in mind that the output from the convolutional layer will be \n", 389 | " # (num_batch, num_filters, height, width). You can use scipy.signal.convolve w. arg. mode=\"valid\", \n", 390 | " # to convolve a single channel from a single filter with a single channel \n", 391 | " # from the input images. To do the convolution you need to convolve each filter with with each\n", 392 | " # channel of the input.\n", 393 | " #\n", 394 | " # PSEUDO CODE:\n", 395 | " # Iterate over batches\n", 396 | " # Iterate over filters\n", 397 | " # iterate over n_channels\n", 398 | " # Do the convolution as described above. \n", 399 | " # Keep in mind that you need to accumulate the output in the out matrix.\n", 400 | " #\n", 401 | " ###################### END YOUR TASK #############################\n", 402 | " raise ValueError('Not yet implemented')\n", 403 | " \n", 404 | " return out\n", 405 | " \n", 406 | "\n", 407 | "class ConvLayer():\n", 408 | " def __init__(self, n_channels, n_filters, filter_size=5, scale=0.01,\n", 409 | " border_mode='same'):\n", 410 | " self.n_channels = n_channels\n", 411 | " self.n_filters = n_filters\n", 412 | " self.filter_size = filter_size\n", 413 | " w_shape = (n_filters, n_channels, filter_size, filter_size)\n", 414 | " self.W = np.random.normal(size=w_shape, scale=scale)\n", 415 | " self.b = np.zeros((1, n_filters, 1, 1))\n", 416 | " if border_mode == 'valid':\n", 417 | " self.padding = 0\n", 418 | " elif border_mode == 'same':\n", 419 | " self.padding = filter_size // 2\n", 420 | " elif border_mode == 'full':\n", 421 | " self.padding = filter_size - 1\n", 422 | " else:\n", 423 | " raise ValueError('Invalid border_mode: %s' % border_mode)\n", 424 | " self.padding = (self.padding, self.padding)\n", 425 | "\n", 426 | " \n", 427 | " def __str__(self): \n", 428 | " return (\"ConvLayer(%i, %i, %i)\"\n", 429 | " % (self.n_channels, self.n_filters, self.filter_size))\n", 430 | "\n", 431 | " def fprop(self, x, *args):\n", 432 | " '''\n", 433 | " Input:\n", 434 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 435 | " Output:\n", 436 | " Array of shape (batch_size, n_filters, out_height, out_width)\n", 437 | " '''\n", 438 | " # Store x for brop()\n", 439 | " self.x = x\n", 440 | "\n", 441 | " # Perform convolution\n", 442 | " y = conv_bc01(x, self.W, self.padding)\n", 443 | " \n", 444 | " #### YOUR TASK ######\n", 445 | " # Add bias to filters y (Hint. This is a one-liner)\n", 446 | " #####################\n", 447 | " raise ValueError('Not yet implemented')\n", 448 | " return y\n", 449 | " \n", 450 | " def bprop(self, dy):\n", 451 | " # dy.shape = (batch_size, n_filters, height, width)\n", 452 | " # Flip weights\n", 453 | " w_flipped = self.W[:, :, ::-1, ::-1]\n", 454 | " # Transpose channel/filter dimensions of weights\n", 455 | " w_tilde_flipped = np.transpose(w_flipped, (1, 0, 2, 3))\n", 456 | " # The dimensions of w_tilde_flipped is now \n", 457 | " # (n_channels, n_filters, filter_size, filter_size)\n", 458 | " \n", 459 | " ### YOUR TASK ####\n", 460 | " # Propagate gradients to x. To propagate the weights you need\n", 461 | " # to convolve the loss (dy) with the flipped W_tilde weights\n", 462 | " # Your task is to use the implemented conv function to implement \n", 463 | " # (L * w_tilde_flipped)\n", 464 | " ### END YOUR TASK\n", 465 | " raise ValueError('Not yet implemented')\n", 466 | " \n", 467 | " # Propagate gradients to weights\n", 468 | " x_padded = np.pad(self.x, (\n", 469 | " (0, 0), (0, 0), self.padding, self.padding), mode='constant')\n", 470 | " self.grad_W = np.zeros_like(self.W)\n", 471 | " \n", 472 | " \n", 473 | " #### YOUR TASK\n", 474 | " # Calculate the gradients for the weights in the filterbank. \n", 475 | " # This is a similar operation to the forward convolution that we implemented above.\n", 476 | " #\n", 477 | " # Remember that the x_padded is (n_batch, n_channels, height+pad, widht+pad)\n", 478 | " # and dy is shape (n_batches, n_filters, height, width)\n", 479 | " # Finally the W is shape (n_filters, n_channels, height, width)\n", 480 | " # \n", 481 | " # PSEUDO CODE\n", 482 | " # Iterate over batches\n", 483 | " # Iterate over filters\n", 484 | " # Iterate over channels\n", 485 | " # Convolve the c'th channel in the x with the f'th filter error \n", 486 | " # of dy. This corresponds to dy*X_padded{f, :, :}\n", 487 | " # Then accumulate the gradient in the c'th channels of the f'th filter.\n", 488 | " # (Note that we'll do the flip after the nested for-loop)\n", 489 | " ### END YOUR TASK\n", 490 | " \n", 491 | " \n", 492 | " self.grad_W = self.grad_W[:, :, ::-1, ::-1] # flipping\n", 493 | " raise ValueError('Not yet implemented')\n", 494 | "\n", 495 | " #### YOUR TASK\n", 496 | " # Propagate gradients to bias\n", 497 | " # In our implementation we use a single bias for each filter. Recall that \n", 498 | " # in the linear layer we had a single bias for each hidden unit and summed out\n", 499 | " # the batch dimension. In the convolutional layer we have single bias for each\n", 500 | " # filter. The bias is shared across batches and spatial position (height, width)\n", 501 | " ### END YOUR TASK\n", 502 | " self.grad_b = None \n", 503 | " raise ValueError('Not yet implemented')\n", 504 | "\n", 505 | " return dx\n", 506 | " \n", 507 | " def update_params(self, lr):\n", 508 | " self.W = self.W - self.grad_W*lr\n", 509 | " self.b = self.b - self.grad_b*lr\n", 510 | "\n", 511 | " def params(self):\n", 512 | " return self.W, self.b\n", 513 | "\n", 514 | " def grads(self):\n", 515 | " return self.grad_W, self.grad_b\n", 516 | "\n", 517 | "\n", 518 | "# Remember to try different parameters. The given parameters are chosen \n", 519 | "# as simple as possible and you may easily discover mistakes in your\n", 520 | "# code by changing the parameters.\n", 521 | "\n", 522 | "batch_size = 2\n", 523 | "n_channels = 1\n", 524 | "img_shape = (5, 5)\n", 525 | "n_filters = 2\n", 526 | "filter_size = 3\n", 527 | "\n", 528 | "# Border_modes 'full' and 'valid' are left as a bonus task.\n", 529 | "border_mode = 'same'\n", 530 | "\n", 531 | "x = np.random.normal(size=(batch_size, n_channels) + img_shape)\n", 532 | "layer = ConvLayer(n_channels=n_channels, n_filters=n_filters,\n", 533 | " filter_size=filter_size, border_mode=border_mode)\n", 534 | "\n", 535 | "check_grad(layer, x)\n", 536 | "print('Gradient check passed')" 537 | ] 538 | }, 539 | { 540 | "cell_type": "markdown", 541 | "metadata": {}, 542 | "source": [ 543 | "## Task #2: Pooling layer\n", 544 | "\n", 545 | "\n", 546 | "You should implement average pooling by fillling out the missing pieces and execute the cell. If the gradient check fails, you will get an error.\n", 547 | "\n", 548 | "### Bonus task:\n", 549 | "- Implement max pooling." 550 | ] 551 | }, 552 | { 553 | "cell_type": "code", 554 | "execution_count": null, 555 | "metadata": { 556 | "collapsed": false 557 | }, 558 | "outputs": [], 559 | "source": [ 560 | "class PoolLayer():\n", 561 | " def __init__(self, win_size=3, stride=2):\n", 562 | " self.win_size = win_size\n", 563 | " self.stride = stride\n", 564 | " self.padding = self.win_size // 2\n", 565 | "\n", 566 | " def __str__(self): \n", 567 | " return \"PoolLayer(%i, %i)\" % (self.win_size, self.stride)\n", 568 | "\n", 569 | " def fprop(self, imgs, *args):\n", 570 | " '''\n", 571 | " Input:\n", 572 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 573 | " Output:\n", 574 | " Array of shape (batch_size, n_channels, out_height, out_width)\n", 575 | " '''\n", 576 | " batch_size, n_channels, img_h, img_w = imgs.shape\n", 577 | "\n", 578 | " # Store x for brop()\n", 579 | " self.imgs = imgs\n", 580 | "\n", 581 | " # Create output array\n", 582 | " out_h = (img_h - self.win_size + 2*self.padding) // self.stride + 1\n", 583 | " out_w = (img_w - self.win_size + 2*self.padding) // self.stride + 1\n", 584 | " out = np.zeros((batch_size, n_channels, out_h, out_w))\n", 585 | " \n", 586 | " # Perform average pooling\n", 587 | " imgs = imgs / self.win_size**2\n", 588 | " for b in range(batch_size):\n", 589 | " for c in range(n_channels):\n", 590 | " for y in range(out_h):\n", 591 | " for x in range(out_w):\n", 592 | " pass\n", 593 | " raise ValueError('Not yet implemented')\n", 594 | " return out\n", 595 | " \n", 596 | " def bprop(self, dy):\n", 597 | " dx = np.zeros_like(self.imgs)\n", 598 | " raise ValueError('Not yet implemented')\n", 599 | " return dx\n", 600 | "\n", 601 | " def update_params(self, lr):\n", 602 | " pass\n", 603 | "\n", 604 | " def params(self):\n", 605 | " return []\n", 606 | "\n", 607 | " def grads(self):\n", 608 | " return []\n", 609 | "\n", 610 | "# Remember to try different parameters. The given parameters are chosen \n", 611 | "# as simple as possible and you may easily discover mistakes in your\n", 612 | "# code by changing the parameters.\n", 613 | "\n", 614 | "batch_size = 1\n", 615 | "n_channels = 1\n", 616 | "img_shape = (5, 5)\n", 617 | "win_size = 3\n", 618 | "\n", 619 | "x = np.random.normal(size=(batch_size, n_channels) + img_shape)\n", 620 | "\n", 621 | "layer = PoolLayer(win_size=3, stride=2)\n", 622 | "check_grad(layer, x)\n", 623 | "print('Gradient check passed')" 624 | ] 625 | }, 626 | { 627 | "cell_type": "markdown", 628 | "metadata": {}, 629 | "source": [ 630 | "## Task #3: Flatten layer\n", 631 | "\n", 632 | "\n", 633 | "You should implement flattening such that your convnet layers can be used with a multi-layer perceptron network. Fill out the missing pieces. Gradient checking shouldn't be necessary for this task." 634 | ] 635 | }, 636 | { 637 | "cell_type": "code", 638 | "execution_count": null, 639 | "metadata": { 640 | "collapsed": true 641 | }, 642 | "outputs": [], 643 | "source": [ 644 | "class FlattenLayer():\n", 645 | " def __str__(self): \n", 646 | " return \"Flatten()\"\n", 647 | "\n", 648 | " def fprop(self, x, *args):\n", 649 | " '''\n", 650 | " Input:\n", 651 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 652 | " Output:\n", 653 | " Array of shape (batch_size, n_channels * img_height * img_width)\n", 654 | " '''\n", 655 | "\n", 656 | " # Store shape for brop()\n", 657 | " self.shape = x.shape\n", 658 | " raise ValueError('Not yet implemented')\n", 659 | "\n", 660 | " def bprop(self, delta_in):\n", 661 | " raise ValueError('Not yet implemented')\n", 662 | "\n", 663 | " def update_params(self, lr):\n", 664 | " pass" 665 | ] 666 | }, 667 | { 668 | "cell_type": "markdown", 669 | "metadata": {}, 670 | "source": [ 671 | "## Task #4: A pretty lousy convnet!\n", 672 | "\n", 673 | "Unfortunately, your implementation is too slow to be useful. However, as a final check of your convnet layers, you should try to train a small convnet on MNIST images.\n", 674 | "\n", 675 | "Run the code and verify that you get an accuracy above 0.2 after 150 gradient updates." 676 | ] 677 | }, 678 | { 679 | "cell_type": "code", 680 | "execution_count": null, 681 | "metadata": { 682 | "collapsed": false 683 | }, 684 | "outputs": [], 685 | "source": [ 686 | "num_samples, n_channels, img_h, img_w = x_train.shape\n", 687 | "num_hidden_units = 64\n", 688 | "num_classes = 10\n", 689 | "\n", 690 | "layers = [\n", 691 | " ConvLayer(n_channels=1, n_filters=4, filter_size=5, scale=0.1),\n", 692 | " PoolLayer(win_size=3, stride=2),\n", 693 | " ReluActivationLayer(),\n", 694 | " ConvLayer(n_channels=4, n_filters=16, filter_size=5, scale=0.1),\n", 695 | " PoolLayer(win_size=3, stride=2),\n", 696 | " ReluActivationLayer(),\n", 697 | " FlattenLayer(),\n", 698 | " LinearLayer(784, num_hidden_units, scale=0.1),\n", 699 | " ReluActivationLayer(),\n", 700 | " LinearLayer(num_hidden_units, num_classes, scale=0.1),\n", 701 | " SoftmaxActivationLayer(),\n", 702 | "]\n", 703 | "\n", 704 | "LossLayer = CrossEntropyLoss()\n", 705 | "\n", 706 | "def forward(x):\n", 707 | " for layer in layers:\n", 708 | " x = layer.fprop(x)\n", 709 | " return x\n", 710 | "\n", 711 | "def backward(y_probs, targets):\n", 712 | " d = LossLayer.bprop(y_probs, targets)\n", 713 | " for layer in reversed(layers):\n", 714 | " d = layer.bprop(d)\n", 715 | " \n", 716 | "def update(learning_rate):\n", 717 | " for layer in layers:\n", 718 | " layer.update_params(learning_rate)\n", 719 | "\n", 720 | "\n", 721 | "from confusionmatrix import ConfusionMatrix\n", 722 | "batch_size = 4\n", 723 | "num_epochs = 50\n", 724 | "learning_rate = 0.05\n", 725 | "num_samples = x_train.shape[0]\n", 726 | "num_batches = num_samples // batch_size\n", 727 | "\n", 728 | "\n", 729 | "n_updates = 0\n", 730 | "for epoch in range(num_epochs):\n", 731 | " confusion = ConfusionMatrix(num_classes)\n", 732 | " for i in range(num_batches):\n", 733 | " n_updates += 1\n", 734 | " idx = range(i*batch_size, (i+1)*batch_size)\n", 735 | " x_batch = x_train[idx]\n", 736 | " target_batch = targets_train[idx]\n", 737 | " y_probs = forward(x_batch)\n", 738 | " loss = LossLayer.fprop(y_probs, target_batch)\n", 739 | " backward(y_probs, target_batch)\n", 740 | " update(learning_rate)\n", 741 | " confusion.batch_add(target_batch.argmax(-1), y_probs.argmax(-1))\n", 742 | " \n", 743 | " if n_updates % 25 == 0:\n", 744 | " curr_acc = confusion.accuracy()\n", 745 | " print \"Update %i : Loss %f Train acc %f\" % (n_updates, loss, curr_acc)" 746 | ] 747 | } 748 | ], 749 | "metadata": { 750 | "kernelspec": { 751 | "display_name": "Python 2", 752 | "language": "python", 753 | "name": "python2" 754 | }, 755 | "language_info": { 756 | "codemirror_mode": { 757 | "name": "ipython", 758 | "version": 2 759 | }, 760 | "file_extension": ".py", 761 | "mimetype": "text/x-python", 762 | "name": "python", 763 | "nbconvert_exporter": "python", 764 | "pygments_lexer": "ipython2", 765 | "version": "2.7.9" 766 | } 767 | }, 768 | "nbformat": 4, 769 | "nbformat_minor": 0 770 | } 771 | -------------------------------------------------------------------------------- /day2-Conv/convnet_exercise_solution.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Convnet exercise\n", 8 | "\n", 9 | "In this exercise you will be implementing a (pretty slow) convnet! " 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## Preliminaries" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": { 23 | "collapsed": true 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "import numpy as np\n", 28 | "import scipy.signal\n", 29 | "import matplotlib.pyplot as plt\n", 30 | "import matplotlib\n", 31 | "%matplotlib inline" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "## Feedforward network from yesterday's assignment\n", 39 | "\n", 40 | "We extend on the neural network from yesterday which is provided (in full) below. Just execute the cell." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "collapsed": true 48 | }, 49 | "outputs": [], 50 | "source": [ 51 | "def onehot(t, num_classes):\n", 52 | " out = np.zeros((t.shape[0], num_classes))\n", 53 | " for row, col in enumerate(t):\n", 54 | " out[row, col] = 1\n", 55 | " return out\n", 56 | "\n", 57 | "def linear(x):\n", 58 | " return x\n", 59 | "\n", 60 | "def sigmoid(x):\n", 61 | " return 1 / (1 + np.exp(-x))\n", 62 | "\n", 63 | "def tanh(x):\n", 64 | " return (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))\n", 65 | "\n", 66 | "def relu(x):\n", 67 | " return np.maximum(0, x)\n", 68 | "\n", 69 | "def softplus(x):\n", 70 | " return np.log(np.exp(x) + 1)\n", 71 | "\n", 72 | "class LinearLayer():\n", 73 | " def __init__(self, num_inputs, num_units, scale=0.01):\n", 74 | " self.num_units = num_units\n", 75 | " self.num_inputs = num_inputs\n", 76 | " self.W = np.random.normal(size=(num_inputs, num_units), scale=scale)\n", 77 | " self.b = np.zeros(num_units)\n", 78 | "\n", 79 | " def __str__(self): \n", 80 | " return \"LinearLayer(%i, %i)\" % (self.num_inputs, self.num_units)\n", 81 | "\n", 82 | " def fprop(self, x, *args):\n", 83 | " self.x = x\n", 84 | " self.a = np.dot(x, self.W) + self.b\n", 85 | " return self.a\n", 86 | " \n", 87 | " def bprop(self, delta_in):\n", 88 | " x_t = np.transpose(self.x)\n", 89 | " self.grad_W = np.dot(x_t, delta_in)\n", 90 | " self.grad_b = delta_in.sum(axis=0)\n", 91 | " W_T = np.transpose(self.W)\n", 92 | " self.delta_out = np.dot(delta_in,W_T)\n", 93 | " return self.delta_out\n", 94 | " \n", 95 | " def update_params(self, lr):\n", 96 | " self.W = self.W - self.grad_W*lr\n", 97 | " self.b = self.b - self.grad_b*lr\n", 98 | " \n", 99 | "class SigmoidActivationLayer():\n", 100 | " def __str__(self): \n", 101 | " return \"Sigmoid()\"\n", 102 | " \n", 103 | " def fprop(self, x, train=True):\n", 104 | " self.a = 1.0 / (1+np.exp(-x))\n", 105 | " return self.a\n", 106 | " \n", 107 | " def bprop(self, delta_in):\n", 108 | " delta_out = self.a * (1 - self.a)*delta_in\n", 109 | " return delta_out\n", 110 | " \n", 111 | " def update_params(self, lr):\n", 112 | " pass\n", 113 | " \n", 114 | "class TanhActivationLayer():\n", 115 | " def __str__(self): \n", 116 | " return \"Tanh()\"\n", 117 | " \n", 118 | " def fprop(self, x, train=True):\n", 119 | " self.a = (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))\n", 120 | " return self.a\n", 121 | "\n", 122 | " def bprop(self, delta_in):\n", 123 | " delta_out = (1 - ((np.exp(self.a)-np.exp(-self.a)) / (np.exp(self.a)+np.exp(-self.a))))*delta_in\n", 124 | " return delta_out\n", 125 | " \n", 126 | " def update_params(self, lr):\n", 127 | " pass\n", 128 | "\n", 129 | "class ReluActivationLayer():\n", 130 | " def __str__(self): \n", 131 | " return \"ReLU()\"\n", 132 | "\n", 133 | " def fprop(self, x, train=True):\n", 134 | " self.a = np.maximum(0, x)\n", 135 | " return self.a\n", 136 | " \n", 137 | " def bprop(self, delta_in):\n", 138 | " return delta_in * (self.a > 0).astype(self.a.dtype)\n", 139 | " \n", 140 | " def update_params(self, lr):\n", 141 | " pass\n", 142 | " \n", 143 | "class SoftplusActivationLayer():\n", 144 | " def __str__(self):\n", 145 | " return \"Softplus()\"\n", 146 | " \n", 147 | " def fprop(self, x, train=True):\n", 148 | " self.a = np.log(np.exp(x) + 1)\n", 149 | " return self.a\n", 150 | " \n", 151 | " def bprop(self, delta_in):\n", 152 | " return delta_in * (1./(1.+np.exp(-x)))\n", 153 | " \n", 154 | " def update_params(self, lr):\n", 155 | " pass\n", 156 | "\n", 157 | " \n", 158 | "class SoftmaxActivationLayer():\n", 159 | " def __str__(self): \n", 160 | " return \"Softmax()\"\n", 161 | " \n", 162 | " def fprop(self, x, train=True):\n", 163 | " x_exp = np.exp(x)\n", 164 | " normalizer = x_exp.sum(axis=-1, keepdims=True)\n", 165 | " self.a = x_exp / normalizer\n", 166 | " return self.a\n", 167 | " \n", 168 | " def bprop(self, delta_in):\n", 169 | " return delta_in\n", 170 | " \n", 171 | " def update_params(self, lr):\n", 172 | " pass\n", 173 | "\n", 174 | "class MeanSquaredLoss():\n", 175 | " def __str__(self): \n", 176 | " return \"MeanSquaredLoss()\"\n", 177 | " \n", 178 | " def fprop(self, x, t):\n", 179 | " num_batches = x.shape[0]\n", 180 | " cost = 0.5 * (x-t)**2 / num_batches\n", 181 | " return np.mean(np.sum(cost, axis=-1))\n", 182 | " \n", 183 | " def bprop(self, y, t):\n", 184 | " num_batches = y.shape[0]\n", 185 | " delta_out = (1./num_batches) * (y-t)\n", 186 | " return delta_out\n", 187 | " \n", 188 | " def update_params(self):\n", 189 | " pass\n", 190 | "\n", 191 | "class CrossEntropyLoss():\n", 192 | " def __str__(self): \n", 193 | " return \"CrossEntropyLoss()\"\n", 194 | " \n", 195 | " def fprop(self, x, t):\n", 196 | " tol = 1e-8\n", 197 | " return np.mean(np.sum(-t * np.log(x + tol), axis=-1))\n", 198 | " \n", 199 | " def bprop(self, y, t):\n", 200 | " num_batches = y.shape[0]\n", 201 | " delta_out = (1./num_batches) * (y-t)\n", 202 | " return delta_out\n", 203 | " \n", 204 | " def update_params(self):\n", 205 | " pass" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "metadata": {}, 211 | "source": [ 212 | "## Load dataset" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "collapsed": false 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "data = np.load('mnist.npz')\n", 224 | "num_classes = 10\n", 225 | "x_train = data['X_train']\n", 226 | "targets_train = data['y_train']\n", 227 | "x_train = np.reshape(x_train, (-1, 1, 28, 28))\n", 228 | "targets_train = onehot(targets_train, num_classes)\n", 229 | "\n", 230 | "mean = np.mean(x_train)\n", 231 | "std = np.std(x_train)\n", 232 | "x_train -= mean\n", 233 | "x_train /= std" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "## Gradient checking\n", 241 | "\n", 242 | "In order to verify the correctness of your layers, you will need to [check their gradients numerically](http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization). Below, we have implemented gradient checking functionality for you. Just execute the cell." 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": { 249 | "collapsed": false 250 | }, 251 | "outputs": [], 252 | "source": [ 253 | "def gradclose(a, b, rtol=None, atol=None):\n", 254 | " rtol = 1e-05 if rtol is None else rtol\n", 255 | " atol = 1e-08 if atol is None else atol\n", 256 | " diff = abs(a - b) - atol - rtol * (abs(a) + abs(b))\n", 257 | " is_close = np.all(diff < 0)\n", 258 | " if not is_close:\n", 259 | " denom = abs(a) + abs(b)\n", 260 | " mask = denom == 0\n", 261 | " rel_error = abs(a - b) / (denom + mask)\n", 262 | " rel_error[mask] = 0\n", 263 | " rel_error = np.max(rel_error)\n", 264 | " abs_error = np.max(abs(a - b))\n", 265 | " print('rel_error=%.4e, abs_error=%.4e, rtol=%.2e, atol=%.2e'\n", 266 | " % (rel_error, abs_error, rtol, atol))\n", 267 | " return is_close\n", 268 | "\n", 269 | "\n", 270 | "\n", 271 | "def approx_fprime(x, f, eps=None, *args):\n", 272 | " '''\n", 273 | " Central difference approximation of the gradient of a scalar function.\n", 274 | " '''\n", 275 | " if eps is None:\n", 276 | " eps = np.sqrt(np.finfo(np.float_).eps)\n", 277 | " grad = np.zeros_like(x)\n", 278 | " step = np.zeros_like(x)\n", 279 | " for idx in np.ndindex(x.shape):\n", 280 | " step[idx] = eps * max(abs(x[idx]), 1.0)\n", 281 | " grad[idx] = (f(*((x+step,) + args)) -\n", 282 | " f(*((x-step,) + args))) / (2*step[idx])\n", 283 | " step[idx] = 0.0\n", 284 | " return grad\n", 285 | "\n", 286 | "\n", 287 | "def check_grad(layer, x0, seed=1, eps=None, rtol=None, atol=None):\n", 288 | " '''\n", 289 | " Numerical gradient checking of layer bprop.\n", 290 | " '''\n", 291 | " # Check input gradient\n", 292 | " def fun(x):\n", 293 | " y = layer.fprop(x)\n", 294 | " return np.sum(y)\n", 295 | "\n", 296 | " def fun_grad(x):\n", 297 | " y = layer.fprop(x)\n", 298 | " y_grad = np.ones_like(y)\n", 299 | " x_grad = layer.bprop(y_grad)\n", 300 | " return x_grad\n", 301 | "\n", 302 | " g_approx = approx_fprime(x0, fun, eps)\n", 303 | " g_true = fun_grad(x0)\n", 304 | " if not gradclose(g_true, g_approx, rtol, atol):\n", 305 | " raise RuntimeError(\n", 306 | " 'Incorrect input gradient: \\nbprop:\\n%s\\napprox:\\n%s'\n", 307 | " % (g_true, g_approx)\n", 308 | " )\n", 309 | "\n", 310 | " # Check parameter gradients\n", 311 | " def fun(x, p_idx):\n", 312 | " param_array = layer.params()[p_idx]\n", 313 | " param_array *= 0\n", 314 | " param_array += x\n", 315 | " y = layer.fprop(x0)\n", 316 | " return np.sum(y)\n", 317 | "\n", 318 | " def fun_grad(x, p_idx):\n", 319 | " param_array = layer.params()[p_idx]\n", 320 | " param_array *= 0\n", 321 | " param_array += x\n", 322 | " out = layer.fprop(x0)\n", 323 | " y_grad = np.ones_like(out)\n", 324 | " layer.bprop(y_grad)\n", 325 | " param_grad = layer.grads()[p_idx]\n", 326 | " return param_grad\n", 327 | "\n", 328 | " for p_idx, p in enumerate(layer.params()):\n", 329 | " x = np.copy(layer.params()[p_idx])\n", 330 | " g_true = fun_grad(x, p_idx)\n", 331 | " g_approx = approx_fprime(x, fun, eps, p_idx)\n", 332 | " if not gradclose(g_true, g_approx, rtol, atol):\n", 333 | " raise RuntimeError(\n", 334 | " 'Incorrect parameter gradient: \\nbprop:\\n%s\\napprox:\\n%s'\n", 335 | " % (g_true, g_approx)\n", 336 | " )" 337 | ] 338 | }, 339 | { 340 | "cell_type": "markdown", 341 | "metadata": { 342 | "collapsed": false 343 | }, 344 | "source": [ 345 | "## Task #1: Convolution layer\n", 346 | "\n", 347 | "\n", 348 | "You should implement a 2D convolution layer by filling out the missing pieces and execute the cell. If the gradient check fails, you will get an error.\n", 349 | "\n", 350 | "### Bonus task:\n", 351 | "- Implement support for border modes `'full'` and `'valid'`" 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": null, 357 | "metadata": { 358 | "collapsed": false 359 | }, 360 | "outputs": [], 361 | "source": [ 362 | "def conv_bc01(imgs, filters, padding):\n", 363 | " batch_size, n_channels_img, img_h, img_w = imgs.shape\n", 364 | " n_filters, n_channels, win_h, win_w = filters.shape\n", 365 | " pad_y, pad_x = padding\n", 366 | " if n_channels != n_channels_img:\n", 367 | " raise ValueError('Mismatch in # of channels')\n", 368 | "\n", 369 | " # Create output array\n", 370 | " out_h = (img_h - win_h + 2*pad_y) + 1\n", 371 | " out_w = (img_w - win_w + 2*pad_x) + 1\n", 372 | " out_shape = (batch_size, n_filters, out_h, out_w)\n", 373 | " out = np.zeros(out_shape)\n", 374 | "\n", 375 | " # Pad input images\n", 376 | " imgs = np.pad(imgs, ((0, 0), (0, 0), padding, padding), mode='constant')\n", 377 | "\n", 378 | " # Perform convolution\n", 379 | " for b in range(batch_size):\n", 380 | " for f in range(n_filters):\n", 381 | " for c in range(n_channels):\n", 382 | " out[b, f] += scipy.signal.convolve(imgs[b, c], filters[f, c], mode='valid')\n", 383 | " return out\n", 384 | " \n", 385 | "\n", 386 | "class ConvLayer():\n", 387 | " def __init__(self, n_channels, n_filters, filter_size=5, scale=0.01,\n", 388 | " border_mode='same'):\n", 389 | " self.n_channels = n_channels\n", 390 | " self.n_filters = n_filters\n", 391 | " self.filter_size = filter_size\n", 392 | " w_shape = (n_filters, n_channels, filter_size, filter_size)\n", 393 | " self.W = np.random.normal(size=w_shape, scale=scale)\n", 394 | " self.b = np.zeros((1, n_filters, 1, 1))\n", 395 | " if border_mode == 'valid':\n", 396 | " self.padding = 0\n", 397 | " elif border_mode == 'same':\n", 398 | " self.padding = filter_size // 2\n", 399 | " elif border_mode == 'full':\n", 400 | " self.padding = filter_size - 1\n", 401 | " else:\n", 402 | " raise ValueError('Invalid border_mode: %s' % border_mode)\n", 403 | " self.padding = (self.padding, self.padding)\n", 404 | "\n", 405 | " \n", 406 | " def __str__(self): \n", 407 | " return (\"ConvLayer(%i, %i, %i)\"\n", 408 | " % (self.n_channels, self.n_filters, self.filter_size))\n", 409 | "\n", 410 | " def fprop(self, x, *args):\n", 411 | " '''\n", 412 | " Input:\n", 413 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 414 | " Output:\n", 415 | " Array of shape (batch_size, n_filters, out_height, out_width)\n", 416 | " '''\n", 417 | " # Store x for brop()\n", 418 | " self.x = x\n", 419 | "\n", 420 | " # Perform convolution\n", 421 | " y = conv_bc01(x, self.W, self.padding)\n", 422 | " \n", 423 | " # Add bias\n", 424 | " y = y + self.b\n", 425 | " return y\n", 426 | " \n", 427 | " def bprop(self, dy):\n", 428 | " # Flip weights\n", 429 | " w = self.W[:, :, ::-1, ::-1]\n", 430 | " # Transpose channel/filter dimensions of weights\n", 431 | " w = np.transpose(w, (1, 0, 2, 3))\n", 432 | "\n", 433 | " # Propagate gradients to x\n", 434 | " dx = conv_bc01(dy, w, self.padding)\n", 435 | " \n", 436 | " # Propagate gradients to weights\n", 437 | " x = np.pad(self.x, ((0, 0), (0, 0), self.padding, self.padding), mode='constant')\n", 438 | "\n", 439 | " self.grad_W = np.zeros_like(self.W)\n", 440 | " for b in range(dy.shape[0]):\n", 441 | " for f in range(self.W.shape[0]):\n", 442 | " for c in range(self.W.shape[1]):\n", 443 | " self.grad_W[f, c] += scipy.signal.convolve(x[b, c], dy[b, f], mode='valid')\n", 444 | " self.grad_W = self.grad_W[:, :, ::-1, ::-1]\n", 445 | "\n", 446 | " # Propagate gradients to bias\n", 447 | " self.grad_b = np.sum(dy, keepdims=True, axis=(0, 2, 3))\n", 448 | " return dx\n", 449 | " \n", 450 | " def update_params(self, lr):\n", 451 | " self.W = self.W - self.grad_W*lr\n", 452 | " self.b = self.b - self.grad_b*lr\n", 453 | "\n", 454 | " def params(self):\n", 455 | " return self.W, self.b\n", 456 | "\n", 457 | " def grads(self):\n", 458 | " return self.grad_W, self.grad_b\n", 459 | "\n", 460 | "\n", 461 | "# Remember to try different parameters. The given parameters are chosen \n", 462 | "# as simple as possible and you may easily discover mistakes in your\n", 463 | "# code by changing the parameters.\n", 464 | "\n", 465 | "batch_size = 2\n", 466 | "n_channels = 1\n", 467 | "img_shape = (5, 5)\n", 468 | "n_filters = 2\n", 469 | "filter_size = 3\n", 470 | "\n", 471 | "# Border_modes 'full' and 'valid' are left as a bonus task.\n", 472 | "border_mode = 'same'\n", 473 | "\n", 474 | "x = np.random.normal(size=(batch_size, n_channels) + img_shape)\n", 475 | "layer = ConvLayer(n_channels=n_channels, n_filters=n_filters,\n", 476 | " filter_size=filter_size, border_mode=border_mode)\n", 477 | "\n", 478 | "check_grad(layer, x)\n", 479 | "print('Gradient check passed')" 480 | ] 481 | }, 482 | { 483 | "cell_type": "markdown", 484 | "metadata": {}, 485 | "source": [ 486 | "## Task #2: Pooling layer\n", 487 | "\n", 488 | "\n", 489 | "You should implement average pooling by fillling out the missing pieces and execute the cell. If the gradient check fails, you will get an error.\n", 490 | "\n", 491 | "### Bonus task:\n", 492 | "- Implement max pooling." 493 | ] 494 | }, 495 | { 496 | "cell_type": "code", 497 | "execution_count": null, 498 | "metadata": { 499 | "collapsed": false 500 | }, 501 | "outputs": [], 502 | "source": [ 503 | "class PoolLayer():\n", 504 | " def __init__(self, win_size=3, stride=2):\n", 505 | " self.win_size = win_size\n", 506 | " self.stride = stride\n", 507 | " self.padding = self.win_size // 2\n", 508 | "\n", 509 | " def __str__(self): \n", 510 | " return \"PoolLayer(%i, %i)\" % (self.win_size, self.stride)\n", 511 | "\n", 512 | " def fprop(self, imgs, *args):\n", 513 | " '''\n", 514 | " Input:\n", 515 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 516 | " Output:\n", 517 | " Array of shape (batch_size, n_channels, out_height, out_width)\n", 518 | " '''\n", 519 | " batch_size, n_channels, img_h, img_w = imgs.shape\n", 520 | "\n", 521 | " # Store x for brop()\n", 522 | " self.imgs = imgs\n", 523 | "\n", 524 | " # Create output array\n", 525 | " out_h = (img_h - self.win_size + 2*self.padding) // self.stride + 1\n", 526 | " out_w = (img_w - self.win_size + 2*self.padding) // self.stride + 1\n", 527 | " out = np.zeros((batch_size, n_channels, out_h, out_w))\n", 528 | " \n", 529 | " # Perform average pooling\n", 530 | " imgs = imgs / self.win_size**2\n", 531 | " for b in range(batch_size):\n", 532 | " for c in range(n_channels):\n", 533 | " for y in range(out_h):\n", 534 | " y_ = y * self.stride\n", 535 | " for x in range(out_w):\n", 536 | " x_ = x * self.stride\n", 537 | " win = imgs[b, c, max(y_, 0):y_+self.win_size,\n", 538 | " max(x_, 0):x_+self.win_size]\n", 539 | " out[b, c, y, x] = np.sum(win)\n", 540 | " return out\n", 541 | " \n", 542 | " def bprop(self, dy):\n", 543 | " dx = np.zeros_like(self.imgs)\n", 544 | " dy = dy / self.win_size**2\n", 545 | " for i in range(dx.shape[0]):\n", 546 | " for c in range(dx.shape[1]):\n", 547 | " for y in range(dy.shape[2]):\n", 548 | " y_ = y * self.stride\n", 549 | " for x in range(dy.shape[3]):\n", 550 | " x_ = x * self.stride\n", 551 | " dx[i, c, y_:y_+self.win_size, x_:x_+self.win_size] += dy[i, c, y, x]\n", 552 | " return dx\n", 553 | "\n", 554 | " def update_params(self, lr):\n", 555 | " pass\n", 556 | "\n", 557 | " def params(self):\n", 558 | " return []\n", 559 | "\n", 560 | " def grads(self):\n", 561 | " return []\n", 562 | "\n", 563 | "# Remember to try different parameters. The given parameters are chosen \n", 564 | "# as simple as possible and you may easily discover mistakes in your\n", 565 | "# code by changing the parameters.\n", 566 | "\n", 567 | "batch_size = 1\n", 568 | "n_channels = 1\n", 569 | "img_shape = (5, 5)\n", 570 | "win_size = 3\n", 571 | "\n", 572 | "x = np.random.normal(size=(batch_size, n_channels) + img_shape)\n", 573 | "\n", 574 | "layer = PoolLayer(win_size=3, stride=2)\n", 575 | "check_grad(layer, x)\n", 576 | "print('Gradient check passed')" 577 | ] 578 | }, 579 | { 580 | "cell_type": "markdown", 581 | "metadata": {}, 582 | "source": [ 583 | "## Task #3: Flatten layer\n", 584 | "\n", 585 | "\n", 586 | "You should implement flattening such that your convnet layers can be used with a multi-layer perceptron network. Fill out the missing pieces. Gradient checking shouldn't be necessary for this task." 587 | ] 588 | }, 589 | { 590 | "cell_type": "code", 591 | "execution_count": null, 592 | "metadata": { 593 | "collapsed": true 594 | }, 595 | "outputs": [], 596 | "source": [ 597 | "class FlattenLayer():\n", 598 | " def __str__(self): \n", 599 | " return \"Flatten()\"\n", 600 | "\n", 601 | " def fprop(self, x, *args):\n", 602 | " '''\n", 603 | " Input:\n", 604 | " x: Array of shape (batch_size, n_channels, img_height, img_width)\n", 605 | " Output:\n", 606 | " Array of shape (batch_size, n_channels * img_height * img_width)\n", 607 | " '''\n", 608 | "\n", 609 | " # Store shape for brop()\n", 610 | " self.shape = x.shape\n", 611 | " y = np.reshape(x, (x.shape[0], -1))\n", 612 | " return y\n", 613 | "\n", 614 | " def bprop(self, delta_in):\n", 615 | " return np.reshape(delta_in, self.shape)\n", 616 | "\n", 617 | " def update_params(self, lr):\n", 618 | " pass" 619 | ] 620 | }, 621 | { 622 | "cell_type": "markdown", 623 | "metadata": {}, 624 | "source": [ 625 | "## Task #4: A pretty lousy convnet!\n", 626 | "\n", 627 | "Unfortunately, your implementation is too slow to be useful. However, as a final check of your convnet layers, you should try to train a small convnet on MNIST images.\n", 628 | "\n", 629 | "Run the code and verify that you get an accuracy above 0.2 after 150 gradient updates." 630 | ] 631 | }, 632 | { 633 | "cell_type": "code", 634 | "execution_count": null, 635 | "metadata": { 636 | "collapsed": false 637 | }, 638 | "outputs": [], 639 | "source": [ 640 | "num_samples, n_channels, img_h, img_w = x_train.shape\n", 641 | "num_hidden_units = 64\n", 642 | "num_classes = 10\n", 643 | "\n", 644 | "layers = [\n", 645 | " ConvLayer(n_channels=1, n_filters=4, filter_size=5, scale=0.1),\n", 646 | " PoolLayer(win_size=3, stride=2),\n", 647 | " ReluActivationLayer(),\n", 648 | " ConvLayer(n_channels=4, n_filters=16, filter_size=5, scale=0.1),\n", 649 | " PoolLayer(win_size=3, stride=2),\n", 650 | " ReluActivationLayer(),\n", 651 | " FlattenLayer(),\n", 652 | " LinearLayer(784, num_hidden_units, scale=0.1),\n", 653 | " ReluActivationLayer(),\n", 654 | " LinearLayer(num_hidden_units, num_classes, scale=0.1),\n", 655 | " SoftmaxActivationLayer(),\n", 656 | "]\n", 657 | "\n", 658 | "LossLayer = CrossEntropyLoss()\n", 659 | "\n", 660 | "def forward(x):\n", 661 | " for layer in layers:\n", 662 | " x = layer.fprop(x)\n", 663 | " return x\n", 664 | "\n", 665 | "def backward(y_probs, targets):\n", 666 | " d = LossLayer.bprop(y_probs, targets)\n", 667 | " for layer in reversed(layers):\n", 668 | " d = layer.bprop(d)\n", 669 | " \n", 670 | "def update(learning_rate):\n", 671 | " for layer in layers:\n", 672 | " layer.update_params(learning_rate)\n", 673 | "\n", 674 | "\n", 675 | "from confusionmatrix import ConfusionMatrix\n", 676 | "batch_size = 4\n", 677 | "num_epochs = 50\n", 678 | "learning_rate = 0.05\n", 679 | "num_samples = x_train.shape[0]\n", 680 | "num_batches = num_samples // batch_size\n", 681 | "\n", 682 | "\n", 683 | "n_updates = 0\n", 684 | "for epoch in range(num_epochs):\n", 685 | " confusion = ConfusionMatrix(num_classes)\n", 686 | " for i in range(num_batches):\n", 687 | " n_updates += 1\n", 688 | " idx = range(i*batch_size, (i+1)*batch_size)\n", 689 | " x_batch = x_train[idx]\n", 690 | " target_batch = targets_train[idx]\n", 691 | " y_probs = forward(x_batch)\n", 692 | " loss = LossLayer.fprop(y_probs, target_batch)\n", 693 | " backward(y_probs, target_batch)\n", 694 | " update(learning_rate)\n", 695 | " confusion.batch_add(target_batch.argmax(-1), y_probs.argmax(-1))\n", 696 | " \n", 697 | " if n_updates % 25 == 0:\n", 698 | " curr_acc = confusion.accuracy()\n", 699 | " print \"Update %i : Loss %f Train acc %f\" % (n_updates, loss, curr_acc)" 700 | ] 701 | } 702 | ], 703 | "metadata": { 704 | "kernelspec": { 705 | "display_name": "Python 2", 706 | "language": "python", 707 | "name": "python2" 708 | }, 709 | "language_info": { 710 | "codemirror_mode": { 711 | "name": "ipython", 712 | "version": 2 713 | }, 714 | "file_extension": ".py", 715 | "mimetype": "text/x-python", 716 | "name": "python", 717 | "nbconvert_exporter": "python", 718 | "pygments_lexer": "ipython2", 719 | "version": "2.7.10" 720 | } 721 | }, 722 | "nbformat": 4, 723 | "nbformat_minor": 0 724 | } 725 | -------------------------------------------------------------------------------- /day2-Conv/mnist.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day2-Conv/mnist.npz -------------------------------------------------------------------------------- /day3-RNN/README.md: -------------------------------------------------------------------------------- 1 | # day3-RNN 2 | -------------------------------------------------------------------------------- /day3-RNN/data_generator.py: -------------------------------------------------------------------------------- 1 | __author__ = 'casperkaae' 2 | import numpy as np 3 | 4 | target_to_text = { 5 | '0':'zero', 6 | '1':'one', 7 | '2':'two', 8 | '3':'three', 9 | '4':'four', 10 | '5':'five', 11 | '6':'six', 12 | '7':'seven', 13 | '8':'eight', 14 | '9':'nine', 15 | } 16 | 17 | stop_character = '#' 18 | 19 | input_characters = " ".join(target_to_text.values()) 20 | valid_characters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '#'] + \ 21 | list(set(input_characters)) 22 | 23 | def print_valid_characters(): 24 | l = '' 25 | for i,c in enumerate(valid_characters): 26 | l += "\'%s\'=%i,\t" % (c,i) 27 | print "Number of valid characters:", len(valid_characters) 28 | print l 29 | 30 | ninput_chars = len(valid_characters) 31 | def get_batch(batch_size=100, min_digits = 3, max_digits=3): 32 | ''' 33 | Generates random sequences of integers and translates them to text i.e. 1->'one'. 34 | :param batch_size: number of samples to return 35 | :param min_digits: minimum length of target 36 | :param max_digits: maximum length of target 37 | ''' 38 | text_inputs = [] 39 | int_inputs = [] 40 | text_targets = [] 41 | int_targets = [] 42 | for i in range(batch_size): 43 | #convert integer into a list of digits 44 | tar_len = np.random.randint(min_digits,max_digits+1) 45 | text_target = "".join(map(str,np.random.randint(0,10,tar_len))) + stop_character 46 | inp_str = text_target[:-1] 47 | 48 | #generate the targets as a list of intergers 49 | int_target = map(lambda c: valid_characters.index(c), text_target) 50 | 51 | #generate the text input 52 | text_input = " ".join(map(lambda k: target_to_text[k], inp_str)) 53 | #generate the inputs as a list of intergers 54 | int_input = map(lambda c: valid_characters.index(c), text_input) 55 | 56 | text_inputs.append(text_input) 57 | int_inputs.append(int_input) 58 | text_targets.append(text_target) 59 | int_targets.append(int_target) 60 | 61 | #create the input matrix and mask - note that we zero pad the shorter sequences. 62 | max_input_len = max(map(len,int_inputs)) 63 | inputs = np.zeros((batch_size,max_input_len)) 64 | input_masks = np.zeros((batch_size,max_input_len)) 65 | for (i,inp) in enumerate(int_inputs): 66 | cur_len = len(inp) 67 | inputs[i,:cur_len] = inp 68 | input_masks[i,:cur_len] = 1 69 | 70 | targets = np.zeros((batch_size,max_digits+1)) #+1 to allow space for stop character 71 | target_masks = np.zeros((batch_size,max_digits+1)) #+1 to allow space for stop character 72 | for (i,tar) in enumerate(int_targets): 73 | cur_len = len(tar) 74 | targets[i,:cur_len] = tar 75 | target_masks[i,:cur_len] = 1 76 | 77 | return inputs.astype('int32'), \ 78 | input_masks.astype('float32'), \ 79 | targets.astype('int32'), \ 80 | target_masks.astype('float32'), \ 81 | text_inputs, \ 82 | text_targets 83 | 84 | -------------------------------------------------------------------------------- /day3-RNN/decoder_attention.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import theano 3 | import theano.tensor as T 4 | from lasagne import nonlinearities 5 | from lasagne import init 6 | from lasagne.utils import unroll_scan 7 | from lasagne.layers import MergeLayer 8 | from lasagne.layers.base import Layer 9 | from lasagne.layers import helper 10 | 11 | import numpy as np 12 | import theano 13 | import theano.tensor as T 14 | import lasagne.init as init 15 | import lasagne.nonlinearities as nonlinearities 16 | 17 | from lasagne.layers import Layer 18 | import lasagne 19 | 20 | 21 | # LSTMAttentionDecodeLayer 22 | # Model: Encoder -> Decoder Decoder-LSTM: ... hid_t-1 -> hid_t -> h_t+1 .... 23 | # attention_network | | | 24 | # weighted encoder hidden(output) whid_t-1 -> whid_t -> wh_t+1 25 | 26 | 27 | # LSTMAttentionDecodeFeedBackLayer 28 | # Model: Encoder -> Decoder Decoder-LSTM: ... hid_dec_t-1-> hid_dec_t-> hid_dec_t+1 .... 29 | # | /^ | /^ | /^ 30 | # attention_network | / | / | / 31 | # | / |/ | / 32 | # weighted encoder hidden(output) whid_enc_t-1 -> whid_enc__t -> wh_t+1 33 | # 34 | # This model also allows for adden "pre-steps" to the decoder where the model can 35 | # "comprehend the input data". basically this is just adding extra steps to the 36 | # decoder before producing the targets 37 | # 38 | # 39 | 40 | 41 | class LSTMAttentionDecodeLayer(MergeLayer): 42 | r"""A long short-term memory (LSTM) layer. 43 | 44 | Includes optional "peephole connections" and a forget gate. Based on the 45 | definition in [1]_, which is the current common definition. The output is 46 | computed by 47 | 48 | .. math :: 49 | 50 | i_t &= \sigma_i(W_{xi}x_t + W_{hi}h_{t-1} 51 | + w_{ci}\odot c_{t-1} + b_i)\\ 52 | f_t &= \sigma_f(W_{xf}x_t + W_{hf}h_{t-1} 53 | + w_{cf}\odot c_{t-1} + b_f)\\ 54 | c_t &= f_t \odot c_{t - 1} 55 | + i_t\sigma_c(W_{xc}x_t + W_{hc} h_{t-1} + b_c)\\ 56 | o_t &= \sigma_o(W_{xo}x_t + W_{ho}h_{t-1} + w_{co}\odot c_t + b_o)\\ 57 | h_t &= o_t \odot \sigma_h(c_t) 58 | 59 | Parameters 60 | ---------- 61 | incoming : a :class:`lasagne.layers.Layer` instance or a tuple 62 | The layer feeding into this layer, or the expected input shape. 63 | num_units : int 64 | Number of hidden/cell units in the layer. 65 | W_in_to_ingate : Theano shared variable, numpy array or callable 66 | Initializer for input-to-input gate weight matrix (:math:`W_{xi}`). 67 | W_hid_to_ingate : Theano shared variable, numpy array or callable 68 | Initializer for hidden-to-input gate weight matrix (:math:`W_{hi}`). 69 | W_cell_to_ingate : Theano shared variable, numpy array or callable 70 | Initializer for cell-to-input gate weight vector (:math:`w_{ci}`). 71 | b_ingate : Theano shared variable, numpy array or callable 72 | Initializer for input gate bias vector (:math:`b_i`). 73 | nonlinearity_ingate : callable or None 74 | The nonlinearity that is applied to the input gate activation 75 | (:math:`\sigma_i`). If None is provided, no nonlinearity will be 76 | applied. 77 | W_in_to_forgetgate : Theano shared variable, numpy array or callable 78 | Initializer for input-to-forget gate weight matrix (:math:`W_{xf}`). 79 | W_hid_to_forgetgate : Theano shared variable, numpy array or callable 80 | Initializer for hidden-to-forget gate weight matrix (:math:`W_{hf}`). 81 | W_cell_to_forgetgate : Theano shared variable, numpy array or callable 82 | Initializer for cell-to-forget gate weight vector (:math:`w_{cf}`). 83 | b_forgetgate : Theano shared variable, numpy array or callable 84 | Initializer for forget gate bias vector (:math:`b_f`). 85 | nonlinearity_forgetgate : callable or None 86 | The nonlinearity that is applied to the forget gate activation 87 | (:math:`\sigma_f`). If None is provided, no nonlinearity will be 88 | applied. 89 | W_in_to_cell : Theano shared variable, numpy array or callable 90 | Initializer for input-to-cell weight matrix (:math:`W_{ic}`). 91 | W_hid_to_cell : Theano shared variable, numpy array or callable 92 | Initializer for hidden-to-cell weight matrix (:math:`W_{hc}`). 93 | b_cell : Theano shared variable, numpy array or callable 94 | Initializer for cell bias vector (:math:`b_c`). 95 | nonlinearity_cell : callable or None 96 | The nonlinearity that is applied to the cell activation 97 | (;math:`\sigma_c`). If None is provided, no nonlinearity will be 98 | applied. 99 | W_in_to_outgate : Theano shared variable, numpy array or callable 100 | Initializer for input-to-output gate weight matrix (:math:`W_{io}`). 101 | W_hid_to_outgate : Theano shared variable, numpy array or callable 102 | Initializer for hidden-to-output gate weight matrix (:math:`W_{ho}`). 103 | W_cell_to_outgate : Theano shared variable, numpy array or callable 104 | Initializer for cell-to-output gate weight vector (:math:`w_{co}`). 105 | b_outgate : Theano shared variable, numpy array or callable 106 | Initializer for hidden-to-input gate weight matrix (:math:`b_o`). 107 | nonlinearity_outgate : callable or None 108 | The nonlinearity that is applied to the output gate activation 109 | (:math:`\sigma_o`). If None is provided, no nonlinearity will be 110 | applied. 111 | nonlinearity_out : callable or None 112 | The nonlinearity that is applied to the output (:math:`\sigma_h`). If 113 | None is provided, no nonlinearity will be applied. 114 | cell_init : callable, np.ndarray, theano.shared or TensorVariable 115 | Passing in a TensorVariable allows the user to specify 116 | the value of `cell_init` (:math:`c_0`). In this mode `learn_init` is 117 | ignored for the cell state. 118 | hid_init : callable, np.ndarray, theano.shared or TensorVariable 119 | Passing in a TensorVariable allows the user to specify 120 | the value of `hid_init` (:math:`h_0`). In this mode `learn_init` is 121 | ignored for the hidden state. 122 | backwards : bool 123 | If True, process the sequence backwards and then reverse the 124 | output again such that the output from the layer is always 125 | from :math:`x_1` to :math:`x_n`. 126 | learn_init : bool 127 | If True, initial hidden values are learned. If `hid_init` or 128 | `cell_init` are TensorVariables then the TensorVariable is used and 129 | `learn_init` is ignored for that initial state. 130 | peepholes : bool 131 | If True, the LSTM uses peephole connections. 132 | When False, `W_cell_to_ingate`, `W_cell_to_forgetgate` and 133 | `W_cell_to_outgate` are ignored. 134 | gradient_steps : int 135 | Number of timesteps to include in the backpropagated gradient. 136 | If -1, backpropagate through the entire sequence. 137 | grad_clipping: False or float 138 | If a float is provided, the gradient messages are clipped during the 139 | backward pass. If False, the gradients will not be clipped. See [1]_ 140 | (p. 6) for further explanation. 141 | unroll_scan : bool 142 | If True the recursion is unrolled instead of using scan. For some 143 | graphs this gives a significant speed up but it might also consume 144 | more memory. When `unroll_scan` is true then the `gradient_steps` 145 | setting is ignored. 146 | precompute_input : bool 147 | If True, precompute input_to_hid before iterating through 148 | the sequence. This can result in a speedup at the expense of 149 | an increase in memory usage. 150 | 151 | References 152 | ---------- 153 | .. [1] Graves, Alex: "Generating sequences with recurrent neural networks." 154 | arXiv preprint arXiv:1308.0850 (2013). 155 | """ 156 | def __init__(self, incoming, 157 | num_units, 158 | aln_num_units, 159 | n_decodesteps, 160 | W_align=init.Normal(0.1), 161 | U_align=init.Normal(0.1), 162 | v_align=init.Normal(0.1), 163 | nonlinearity_align=nonlinearities.tanh, 164 | W_hid_to_ingate=init.Normal(0.1), 165 | W_cell_to_ingate=init.Normal(0.1), 166 | b_ingate=init.Constant(0.), 167 | nonlinearity_ingate=nonlinearities.sigmoid, 168 | #W_in_to_forgetgate=init.Normal(0.1), 169 | W_hid_to_forgetgate=init.Normal(0.1), 170 | W_cell_to_forgetgate=init.Normal(0.1), 171 | b_forgetgate=init.Constant(0.), 172 | nonlinearity_forgetgate=nonlinearities.sigmoid, 173 | #W_in_to_cell=init.Normal(0.1), 174 | W_hid_to_cell=init.Normal(0.1), 175 | b_cell=init.Constant(0.), 176 | nonlinearity_cell=nonlinearities.tanh, 177 | #W_in_to_outgate=init.Normal(0.1), 178 | W_hid_to_outgate=init.Normal(0.1), 179 | W_cell_to_outgate=init.Normal(0.1), 180 | b_outgate=init.Constant(0.), 181 | nonlinearity_outgate=nonlinearities.sigmoid, 182 | nonlinearity_out=nonlinearities.tanh, 183 | cell_init=init.Constant(0.), 184 | hid_init=init.Constant(0.), 185 | backwards=False, 186 | learn_init=False, 187 | peepholes=True, 188 | gradient_steps=-1, 189 | grad_clipping=False, 190 | unroll_scan=False, 191 | mask_input=None, 192 | #precompute_input=True, 193 | **kwargs): 194 | 195 | # Initialize parent layer 196 | # This layer inherits from a MergeLayer, because it can have two 197 | # inputs - the layer input, and the mask. We will just provide the 198 | # layer input as incomings, unless a mask input was provided. 199 | incomings = [incoming] 200 | if mask_input is not None: 201 | incomings.append(mask_input) 202 | super(LSTMAttentionDecodeLayer, self).__init__(incomings, **kwargs) 203 | 204 | # For any of the nonlinearities, if None is supplied, use identity 205 | if nonlinearity_ingate is None: 206 | self.nonlinearity_ingate = nonlinearities.identity 207 | else: 208 | self.nonlinearity_ingate = nonlinearity_ingate 209 | 210 | if nonlinearity_forgetgate is None: 211 | self.nonlinearity_forgetgate = nonlinearities.identity 212 | else: 213 | self.nonlinearity_forgetgate = nonlinearity_forgetgate 214 | 215 | if nonlinearity_cell is None: 216 | self.nonlinearity_cell = nonlinearities.identity 217 | else: 218 | self.nonlinearity_cell = nonlinearity_cell 219 | 220 | if nonlinearity_outgate is None: 221 | self.nonlinearity_outgate = nonlinearities.identity 222 | else: 223 | self.nonlinearity_outgate = nonlinearity_outgate 224 | 225 | if nonlinearity_out is None: 226 | self.nonlinearity_out = nonlinearities.identity 227 | else: 228 | self.nonlinearity_out = nonlinearity_out 229 | 230 | self.learn_init = learn_init 231 | self.num_units = num_units 232 | self.backwards = backwards 233 | self.peepholes = peepholes 234 | self.gradient_steps = gradient_steps 235 | self.grad_clipping = grad_clipping 236 | self.unroll_scan = unroll_scan 237 | self.n_decodesteps = n_decodesteps 238 | self.aln_num_units = aln_num_units 239 | self.nonlinearity_align = nonlinearity_align 240 | 241 | # Retrieve the dimensionality of the incoming layer 242 | input_shape = self.input_shapes[0] 243 | if unroll_scan and input_shape[1] is None: 244 | raise ValueError("Input sequence length cannot be specified as " 245 | "None when unroll_scan is True") 246 | 247 | num_inputs = np.prod(self.input_shape[2:]) 248 | 249 | # Initialize parameters using the supplied args 250 | #self.W_in_to_ingate = self.add_param( 251 | # W_in_to_ingate, (num_inputs, num_units), name="W_in_to_ingate") 252 | 253 | self.W_hid_to_ingate = self.add_param( 254 | W_hid_to_ingate, (num_units, num_units), name="W_hid_to_ingate") 255 | 256 | self.b_ingate = self.add_param( 257 | b_ingate, (num_units,), name="b_ingate", regularizable=False) 258 | 259 | #self.W_in_to_forgetgate = self.add_param( 260 | # W_in_to_forgetgate, (num_inputs, num_units), 261 | # name="W_in_to_forgetgate") 262 | 263 | self.W_hid_to_forgetgate = self.add_param( 264 | W_hid_to_forgetgate, (num_units, num_units), 265 | name="W_hid_to_forgetgate") 266 | 267 | self.b_forgetgate = self.add_param( 268 | b_forgetgate, (num_units,), name="b_forgetgate", 269 | regularizable=False) 270 | 271 | #self.W_in_to_cell = self.add_param( 272 | # W_in_to_cell, (num_inputs, num_units), name="W_in_to_cell") 273 | 274 | self.W_hid_to_cell = self.add_param( 275 | W_hid_to_cell, (num_units, num_units), name="W_hid_to_cell") 276 | 277 | self.b_cell = self.add_param( 278 | b_cell, (num_units,), name="b_cell", regularizable=False) 279 | 280 | #self.W_in_to_outgate = self.add_param( 281 | # W_in_to_outgate, (num_inputs, num_units), name="W_in_to_outgate") 282 | 283 | self.W_hid_to_outgate = self.add_param( 284 | W_hid_to_outgate, (num_units, num_units), name="W_hid_to_outgate") 285 | 286 | self.b_outgate = self.add_param( 287 | b_outgate, (num_units,), name="b_outgate", regularizable=False) 288 | 289 | # Stack input weight matrices into a (num_inputs, 4*num_units) 290 | # matrix, which speeds up computation 291 | #self.W_in_stacked = T.concatenate( 292 | # [self.W_in_to_ingate, self.W_in_to_forgetgate, 293 | # self.W_in_to_cell, self.W_in_to_outgate], axis=1) 294 | 295 | # Same for hidden weight matrices 296 | self.W_hid_stacked = T.concatenate( 297 | [self.W_hid_to_ingate, self.W_hid_to_forgetgate, 298 | self.W_hid_to_cell, self.W_hid_to_outgate], axis=1) 299 | 300 | # Stack biases into a (4*num_units) vector 301 | self.b_stacked = T.concatenate( 302 | [self.b_ingate, self.b_forgetgate, 303 | self.b_cell, self.b_outgate], axis=0) 304 | 305 | # If peephole (cell to gate) connections were enabled, initialize 306 | # peephole connections. These are elementwise products with the cell 307 | # state, so they are represented as vectors. 308 | if self.peepholes: 309 | self.W_cell_to_ingate = self.add_param( 310 | W_cell_to_ingate, (num_units, ), name="W_cell_to_ingate") 311 | 312 | self.W_cell_to_forgetgate = self.add_param( 313 | W_cell_to_forgetgate, (num_units, ), 314 | name="W_cell_to_forgetgate") 315 | 316 | self.W_cell_to_outgate = self.add_param( 317 | W_cell_to_outgate, (num_units, ), name="W_cell_to_outgate") 318 | 319 | self.W_align = self.add_param(W_align, (num_units, self.aln_num_units), 320 | name="AlignSeqOutputLayer: (aln) W_a") 321 | self.U_align = self.add_param(U_align, (num_inputs, self.aln_num_units), 322 | name="AlignSeqOutputLayer: (aln) U_a") 323 | self.v_align = self.add_param(v_align, (self.aln_num_units, 1), 324 | name="AlignSeqOutputLayer: v_a") 325 | 326 | 327 | # Setup initial values for the cell and the hidden units 328 | if isinstance(cell_init, T.TensorVariable): 329 | if cell_init.ndim != 2: 330 | raise ValueError( 331 | "When cell_init is provided as a TensorVariable, it should" 332 | " have 2 dimensions and have shape (num_batch, num_units)") 333 | self.cell_init = cell_init 334 | else: 335 | self.cell_init = self.add_param( 336 | cell_init, (1, num_units), name="cell_init", 337 | trainable=learn_init, regularizable=False) 338 | 339 | if isinstance(hid_init, T.TensorVariable): 340 | if hid_init.ndim != 2: 341 | raise ValueError( 342 | "When hid_init is provided as a TensorVariable, it should " 343 | "have 2 dimensions and have shape (num_batch, num_units)") 344 | self.hid_init = hid_init 345 | else: 346 | self.hid_init = self.add_param( 347 | hid_init, (1, self.num_units), name="hid_init", 348 | trainable=learn_init, regularizable=False) 349 | 350 | def get_output_shape_for(self, input_shapes): 351 | input_shape = input_shapes[0] 352 | return input_shape[0], None, self.num_units 353 | 354 | def get_output_for(self, inputs, **kwargs): 355 | """ 356 | Compute this layer's output function given a symbolic input variable 357 | 358 | Parameters 359 | ---------- 360 | input : theano.TensorType 361 | Symbolic input variable. 362 | mask : theano.TensorType 363 | Theano variable denoting whether each time step in each 364 | sequence in the batch is part of the sequence or not. If ``None``, 365 | then it is assumed that all sequences are of the same length. If 366 | not all sequences are of the same length, then it must be 367 | supplied as a matrix of shape ``(n_batch, n_time_steps)`` where 368 | ``mask[i, j] = 1`` when ``j <= (length of sequence i)`` and 369 | ``mask[i, j] = 0`` when ``j > (length of sequence i)``. 370 | 371 | Returns 372 | ------- 373 | layer_output : theano.TensorType 374 | Symblic output variable. 375 | """ 376 | input = inputs[0] 377 | # Retrieve the mask when it is supplied 378 | mask = inputs[1] if len(inputs) > 1 else None 379 | 380 | # Treat all dimensions after the second as flattened feature dimensions 381 | # Retrieve the layer input 382 | if input.ndim > 3: 383 | input = input.reshape((input.shape[0], input.shape[1], 384 | T.prod(input.shape[2:]))) 385 | num_batch = input.shape[0] 386 | encode_seqlen = input.shape[1] 387 | 388 | # At each call to scan, input_n will be (n_time_steps, 4*num_units). 389 | # We define a slicing function that extract the input to each LSTM gate 390 | def slice_w(x, n): 391 | return x[:, n*self.num_units:(n+1)*self.num_units] 392 | 393 | # Create single recurrent computation step function 394 | # input_n is the n'th vector of the input 395 | def step(cell_previous, hid_previous, a_prev, 396 | hUa, W_align, v_align, 397 | W_hid_stacked, W_cell_to_ingate, W_cell_to_forgetgate, 398 | W_cell_to_outgate, b_stacked): 399 | 400 | # Calculate gates pre-activations and slice 401 | gates = T.dot(hid_previous, W_hid_stacked) + b_stacked 402 | 403 | # Clip gradients 404 | if self.grad_clipping is not False: 405 | gates = theano.gradient.grad_clip( 406 | gates, -self.grad_clipping, self.grad_clipping) 407 | 408 | # Extract the pre-activation gate values 409 | ingate = slice_w(gates, 0) 410 | forgetgate = slice_w(gates, 1) 411 | cell_input = slice_w(gates, 2) 412 | outgate = slice_w(gates, 3) 413 | 414 | if self.peepholes: 415 | # Compute peephole connections 416 | ingate += cell_previous*W_cell_to_ingate 417 | forgetgate += cell_previous*W_cell_to_forgetgate 418 | 419 | # Apply nonlinearities 420 | ingate = self.nonlinearity_ingate(ingate) 421 | forgetgate = self.nonlinearity_forgetgate(forgetgate) 422 | cell_input = self.nonlinearity_cell(cell_input) 423 | outgate = self.nonlinearity_outgate(outgate) 424 | 425 | # Compute new cell value 426 | cell = forgetgate*cell_previous + ingate*cell_input 427 | 428 | if self.peepholes: 429 | outgate += cell*W_cell_to_outgate 430 | 431 | # W_align: (num_units, aln_num_units) 432 | # U_align: (num_feats, aln_num_units) 433 | # v_align: (aln_num_units, 1) 434 | # hUa: (BS, Seqlen, aln_num_units) 435 | # hid: (BS, num_units_dec) 436 | # input: (BS, Seqlen, num_inputs) 437 | 438 | # Compute new hidden unit activation 439 | hid = outgate*self.nonlinearity_out(cell) 440 | 441 | #compute (unormalized) attetion vector 442 | sWa = T.dot(hid, W_align) # (BS, aln_num_units) 443 | sWa = sWa.dimshuffle(0, 'x', 1) # (BS, 1, aln_num_units) 444 | tanh_sWahUa = self.nonlinearity_align(sWa + hUa) 445 | # (BS, seqlen, num_units_aln) 446 | 447 | # CALCULATE WEIGHT FOR EACH HIDDEN STATE VECTOR 448 | a = T.dot(tanh_sWahUa, v_align) # (BS, Seqlen, 1) 449 | a = T.reshape(a, (a.shape[0], a.shape[1])) 450 | # # (BS, Seqlen) 451 | # # ->(BS, seq_len) 452 | #a = a.squeeze() 453 | #a = a*a 454 | #a = a*mask - (1-mask)*10000 #this line does not work 455 | #a = T.reshape(a, (input.shape[0], input.shape[1])) 456 | 457 | #alpha = T.nnet.softmax(a) 458 | #alpha = T.reshape(alpha, (input.shape[0], input.shape[1])) 459 | 460 | # 461 | # # create alpha in dim (batch_size, seq_len, 1) 462 | 463 | # 464 | #weighted_hidden = input * alpha.dimshuffle(0, 1, 'x') 465 | #weighted_hidden = T.sum(weighted_hidden, axis=1) #sum seqlen out 466 | 467 | return [cell, hid, a] 468 | 469 | sequences = [] 470 | step_fun = step 471 | 472 | ones = T.ones((num_batch, 1)) 473 | if isinstance(self.cell_init, T.TensorVariable): 474 | cell_init = self.cell_init 475 | else: 476 | # Dot against a 1s vector to repeat to shape (num_batch, num_units) 477 | cell_init = T.dot(ones, self.cell_init) 478 | 479 | if isinstance(self.hid_init, T.TensorVariable): 480 | hid_init = self.hid_init 481 | else: 482 | # Dot against a 1s vector to repeat to shape (num_batch, num_units) 483 | hid_init = T.dot(ones, self.hid_init) 484 | 485 | #weighted_hidden_init = T.zeros((num_batch, input.shape[2])) 486 | alpha_init = T.zeros((num_batch, encode_seqlen)) 487 | 488 | # The hidden-to-hidden weight matrix is always used in step 489 | 490 | hUa = T.dot(input, self.U_align) # (num_batch, seq_len, num_units_aln) 491 | 492 | non_seqs = [hUa, self.W_align, self.v_align, 493 | self.W_hid_stacked] 494 | # The "peephole" weight matrices are only used when self.peepholes=True 495 | if self.peepholes: 496 | non_seqs += [self.W_cell_to_ingate, 497 | self.W_cell_to_forgetgate, 498 | self.W_cell_to_outgate] 499 | # theano.scan only allows for positional arguments, so when 500 | # self.peepholes is False, we need to supply fake placeholder arguments 501 | # for the three peephole matrices. 502 | else: 503 | non_seqs += [(), (), ()] 504 | # When we aren't precomputing the input outside of scan, we need to 505 | # provide the input weights and biases to the step function 506 | non_seqs += [self.b_stacked] 507 | 508 | if self.unroll_scan: 509 | # Explicitly unroll the recurrence instead of using scan 510 | cell_out, hid_out, a_out = unroll_scan( 511 | fn=step_fun, 512 | sequences=sequences, 513 | outputs_info=[cell_init, hid_init, alpha_init], 514 | go_backwards=self.backwards, 515 | non_sequences=non_seqs, 516 | n_steps=self.n_decodesteps) 517 | else: 518 | # Scan op iterates over first dimension of input and repeatedly 519 | # applies the step function 520 | cell_out, hid_out, a_out = theano.scan( 521 | fn=step_fun, 522 | sequences=sequences, 523 | outputs_info=[cell_init, hid_init, alpha_init], 524 | go_backwards=self.backwards, 525 | truncate_gradient=self.gradient_steps, 526 | non_sequences=non_seqs, 527 | n_steps=self.n_decodesteps, 528 | strict=True)[0] 529 | 530 | # dimshuffle back to (n_batch, n_time_steps, n_features)) 531 | 532 | #a_out - (n_decodesteps, bs, seqlen) 533 | #hid_out - (n_decode_steps, bs, num_units) 534 | 535 | 536 | # mask: (BS, encode_seqlen 537 | # a_out; (n_decodesteps, BS, encode_seqlen) 538 | cell_out = cell_out.dimshuffle(1, 0, 2) 539 | mask = mask.dimshuffle(0, 'x', 1) 540 | a_out = a_out.dimshuffle(1, 0, 2) # (BS, n_decodesteps, encode_seqlen) 541 | 542 | # set masked positions to large negative value 543 | a_out = a_out*mask - (1-mask)*10000 544 | 545 | # normalize over encode_seqlen (->large negative values = 0) 546 | a_out = T.reshape(a_out, (num_batch*self.n_decodesteps, encode_seqlen)) 547 | alpha = T.nnet.softmax(a_out) 548 | alpha = T.reshape(alpha, (num_batch, self.n_decodesteps, encode_seqlen)) 549 | 550 | # (BS, encode_seqlen, num_units) -> (BS, num_units, 1 encode_seqlen,) 551 | input = input.dimshuffle(0, 2, 'x', 1) 552 | # (BS, n_decodesteps, encode_seqlen) -> (BS, '1', n_decodesteps, encode_seqlen) 553 | alpha = alpha.dimshuffle(0, 'x', 1, 2) 554 | weighted_hidden_out = input*alpha 555 | 556 | weighted_hidden_out = T.sum(weighted_hidden_out, axis=3) 557 | # (BS, n_decodesteps, num_encode_units) 558 | 559 | # if scan is backward reverse the output 560 | if self.backwards: 561 | hid_out = hid_out[:, ::-1] 562 | cell_out = cell_out[:, ::-1] 563 | weighted_hidden_out = weighted_hidden_out[:, ::-1] 564 | alpha = alpha[:, ::-1] 565 | 566 | self.hid_out = hid_out 567 | self.cell_out = cell_out 568 | self.weighted_hidden_out = weighted_hidden_out 569 | self.alpha = alpha 570 | 571 | return self.weighted_hidden_out 572 | 573 | 574 | class LSTMAttentionDecodeFeedbackLayer(MergeLayer): 575 | r"""A long short-term memory (LSTM) layer. 576 | 577 | Includes optional "peephole connections" and a forget gate. Based on the 578 | definition in [1]_, which is the current common definition. The output is 579 | computed by 580 | 581 | .. math :: 582 | 583 | i_t &= \sigma_i(W_{xi}x_t + W_{hi}h_{t-1} 584 | + w_{ci}\odot c_{t-1} + b_i)\\ 585 | f_t &= \sigma_f(W_{xf}x_t + W_{hf}h_{t-1} 586 | + w_{cf}\odot c_{t-1} + b_f)\\ 587 | c_t &= f_t \odot c_{t - 1} 588 | + i_t\sigma_c(W_{xc}x_t + W_{hc} h_{t-1} + b_c)\\ 589 | o_t &= \sigma_o(W_{xo}x_t + W_{ho}h_{t-1} + w_{co}\odot c_t + b_o)\\ 590 | h_t &= o_t \odot \sigma_h(c_t) 591 | 592 | Parameters 593 | ---------- 594 | incoming : a :class:`lasagne.layers.Layer` instance or a tuple 595 | The layer feeding into this layer, or the expected input shape. 596 | num_units : int 597 | Number of hidden/cell units in the layer. 598 | W_in_to_ingate : Theano shared variable, numpy array or callable 599 | Initializer for input-to-input gate weight matrix (:math:`W_{xi}`). 600 | W_hid_to_ingate : Theano shared variable, numpy array or callable 601 | Initializer for hidden-to-input gate weight matrix (:math:`W_{hi}`). 602 | W_cell_to_ingate : Theano shared variable, numpy array or callable 603 | Initializer for cell-to-input gate weight vector (:math:`w_{ci}`). 604 | b_ingate : Theano shared variable, numpy array or callable 605 | Initializer for input gate bias vector (:math:`b_i`). 606 | nonlinearity_ingate : callable or None 607 | The nonlinearity that is applied to the input gate activation 608 | (:math:`\sigma_i`). If None is provided, no nonlinearity will be 609 | applied. 610 | W_in_to_forgetgate : Theano shared variable, numpy array or callable 611 | Initializer for input-to-forget gate weight matrix (:math:`W_{xf}`). 612 | W_hid_to_forgetgate : Theano shared variable, numpy array or callable 613 | Initializer for hidden-to-forget gate weight matrix (:math:`W_{hf}`). 614 | W_cell_to_forgetgate : Theano shared variable, numpy array or callable 615 | Initializer for cell-to-forget gate weight vector (:math:`w_{cf}`). 616 | b_forgetgate : Theano shared variable, numpy array or callable 617 | Initializer for forget gate bias vector (:math:`b_f`). 618 | nonlinearity_forgetgate : callable or None 619 | The nonlinearity that is applied to the forget gate activation 620 | (:math:`\sigma_f`). If None is provided, no nonlinearity will be 621 | applied. 622 | W_in_to_cell : Theano shared variable, numpy array or callable 623 | Initializer for input-to-cell weight matrix (:math:`W_{ic}`). 624 | W_hid_to_cell : Theano shared variable, numpy array or callable 625 | Initializer for hidden-to-cell weight matrix (:math:`W_{hc}`). 626 | b_cell : Theano shared variable, numpy array or callable 627 | Initializer for cell bias vector (:math:`b_c`). 628 | nonlinearity_cell : callable or None 629 | The nonlinearity that is applied to the cell activation 630 | (;math:`\sigma_c`). If None is provided, no nonlinearity will be 631 | applied. 632 | W_in_to_outgate : Theano shared variable, numpy array or callable 633 | Initializer for input-to-output gate weight matrix (:math:`W_{io}`). 634 | W_hid_to_outgate : Theano shared variable, numpy array or callable 635 | Initializer for hidden-to-output gate weight matrix (:math:`W_{ho}`). 636 | W_cell_to_outgate : Theano shared variable, numpy array or callable 637 | Initializer for cell-to-output gate weight vector (:math:`w_{co}`). 638 | b_outgate : Theano shared variable, numpy array or callable 639 | Initializer for hidden-to-input gate weight matrix (:math:`b_o`). 640 | nonlinearity_outgate : callable or None 641 | The nonlinearity that is applied to the output gate activation 642 | (:math:`\sigma_o`). If None is provided, no nonlinearity will be 643 | applied. 644 | nonlinearity_out : callable or None 645 | The nonlinearity that is applied to the output (:math:`\sigma_h`). If 646 | None is provided, no nonlinearity will be applied. 647 | cell_init : callable, np.ndarray, theano.shared or TensorVariable 648 | Passing in a TensorVariable allows the user to specify 649 | the value of `cell_init` (:math:`c_0`). In this mode `learn_init` is 650 | ignored for the cell state. 651 | hid_init : callable, np.ndarray, theano.shared or TensorVariable 652 | Passing in a TensorVariable allows the user to specify 653 | the value of `hid_init` (:math:`h_0`). In this mode `learn_init` is 654 | ignored for the hidden state. 655 | backwards : bool 656 | If True, process the sequence backwards and then reverse the 657 | output again such that the output from the layer is always 658 | from :math:`x_1` to :math:`x_n`. 659 | learn_init : bool 660 | If True, initial hidden values are learned. If `hid_init` or 661 | `cell_init` are TensorVariables then the TensorVariable is used and 662 | `learn_init` is ignored for that initial state. 663 | peepholes : bool 664 | If True, the LSTM uses peephole connections. 665 | When False, `W_cell_to_ingate`, `W_cell_to_forgetgate` and 666 | `W_cell_to_outgate` are ignored. 667 | gradient_steps : int 668 | Number of timesteps to include in the backpropagated gradient. 669 | If -1, backpropagate through the entire sequence. 670 | grad_clipping: False or float 671 | If a float is provided, the gradient messages are clipped during the 672 | backward pass. If False, the gradients will not be clipped. See [1]_ 673 | (p. 6) for further explanation. 674 | unroll_scan : bool 675 | If True the recursion is unrolled instead of using scan. For some 676 | graphs this gives a significant speed up but it might also consume 677 | more memory. When `unroll_scan` is true then the `gradient_steps` 678 | setting is ignored. 679 | precompute_input : bool 680 | If True, precompute input_to_hid before iterating through 681 | the sequence. This can result in a speedup at the expense of 682 | an increase in memory usage. 683 | 684 | References 685 | ---------- 686 | .. [1] Graves, Alex: "Generating sequences with recurrent neural networks." 687 | arXiv preprint arXiv:1308.0850 (2013). 688 | """ 689 | def __init__(self, incoming, 690 | num_units, 691 | aln_num_units, 692 | n_decodesteps, 693 | W_align=init.Normal(0.1), 694 | U_align=init.Normal(0.1), 695 | v_align=init.Normal(0.1), 696 | U_conv_align=init.Normal(0.1), 697 | nonlinearity_align=nonlinearities.tanh, 698 | W_hid_to_ingate=init.Normal(0.1), 699 | W_cell_to_ingate=init.Normal(0.1), 700 | b_ingate=init.Constant(0.), 701 | nonlinearity_ingate=nonlinearities.sigmoid, 702 | #W_in_to_forgetgate=init.Normal(0.1), 703 | W_hid_to_forgetgate=init.Normal(0.1), 704 | W_cell_to_forgetgate=init.Normal(0.1), 705 | b_forgetgate=init.Constant(0.), 706 | nonlinearity_forgetgate=nonlinearities.sigmoid, 707 | #W_in_to_cell=init.Normal(0.1), 708 | W_hid_to_cell=init.Normal(0.1), 709 | b_cell=init.Constant(0.), 710 | nonlinearity_cell=nonlinearities.tanh, 711 | #W_in_to_outgate=init.Normal(0.1), 712 | W_hid_to_outgate=init.Normal(0.1), 713 | W_cell_to_outgate=init.Normal(0.1), 714 | b_outgate=init.Constant(0.), 715 | nonlinearity_outgate=nonlinearities.sigmoid, 716 | nonlinearity_out=nonlinearities.tanh, 717 | cell_init=init.Constant(0.), 718 | hid_init=init.Constant(0.), 719 | backwards=False, 720 | learn_init=False, 721 | peepholes=True, 722 | gradient_steps=-1, 723 | grad_clipping=False, 724 | unroll_scan=False, 725 | attention_softmax_function=T.nnet.softmax, 726 | #precompute_input=True, 727 | decode_pre_steps=0, 728 | return_decodehid=False, 729 | mask_input=None, 730 | **kwargs): 731 | 732 | # Initialize parent layer 733 | incomings = [incoming] 734 | if mask_input is not None: 735 | incomings.append(mask_input) 736 | super(LSTMAttentionDecodeFeedbackLayer, self).__init__( 737 | incomings, **kwargs) 738 | 739 | # For any of the nonlinearities, if None is supplied, use identity 740 | if nonlinearity_ingate is None: 741 | self.nonlinearity_ingate = nonlinearities.identity 742 | else: 743 | self.nonlinearity_ingate = nonlinearity_ingate 744 | 745 | if nonlinearity_forgetgate is None: 746 | self.nonlinearity_forgetgate = nonlinearities.identity 747 | else: 748 | self.nonlinearity_forgetgate = nonlinearity_forgetgate 749 | 750 | if nonlinearity_cell is None: 751 | self.nonlinearity_cell = nonlinearities.identity 752 | else: 753 | self.nonlinearity_cell = nonlinearity_cell 754 | 755 | if nonlinearity_outgate is None: 756 | self.nonlinearity_outgate = nonlinearities.identity 757 | else: 758 | self.nonlinearity_outgate = nonlinearity_outgate 759 | 760 | if nonlinearity_out is None: 761 | self.nonlinearity_out = nonlinearities.identity 762 | else: 763 | self.nonlinearity_out = nonlinearity_out 764 | 765 | self.attention_softmax_function = attention_softmax_function 766 | 767 | self.learn_init = learn_init 768 | self.num_units = num_units 769 | self.backwards = backwards 770 | self.peepholes = peepholes 771 | self.gradient_steps = gradient_steps 772 | self.grad_clipping = grad_clipping 773 | self.unroll_scan = unroll_scan 774 | self.n_decodesteps = n_decodesteps 775 | self.aln_num_units = aln_num_units 776 | self.nonlinearity_align = nonlinearity_align 777 | self.decode_pre_steps = decode_pre_steps 778 | self.return_decodehid = return_decodehid 779 | 780 | input_shape = self.input_shapes[0] 781 | if unroll_scan and input_shape[1] is None: 782 | raise ValueError("Input sequence length cannot be specified as " 783 | "None when unroll_scan is True") 784 | 785 | num_inputs = np.prod(input_shape[2:]) 786 | self.num_inputs = num_inputs 787 | # Initialize parameters using the supplied args 788 | #self.W_in_to_ingate = self.add_param( 789 | # W_in_to_ingate, (num_inputs, num_units), name="W_in_to_ingate") 790 | 791 | self.W_hid_to_ingate = self.add_param( 792 | W_hid_to_ingate, (num_units, num_units), name="W_hid_to_ingate") 793 | 794 | self.b_ingate = self.add_param( 795 | b_ingate, (num_units,), name="b_ingate", regularizable=False) 796 | 797 | #self.W_in_to_forgetgate = self.add_param( 798 | # W_in_to_forgetgate, (num_inputs, num_units), 799 | # name="W_in_to_forgetgate") 800 | 801 | self.W_hid_to_forgetgate = self.add_param( 802 | W_hid_to_forgetgate, (num_units, num_units), 803 | name="W_hid_to_forgetgate") 804 | 805 | self.b_forgetgate = self.add_param( 806 | b_forgetgate, (num_units,), name="b_forgetgate", 807 | regularizable=False) 808 | 809 | #self.W_in_to_cell = self.add_param( 810 | # W_in_to_cell, (num_inputs, num_units), name="W_in_to_cell") 811 | 812 | self.W_hid_to_cell = self.add_param( 813 | W_hid_to_cell, (num_units, num_units), name="W_hid_to_cell") 814 | 815 | self.b_cell = self.add_param( 816 | b_cell, (num_units,), name="b_cell", regularizable=False) 817 | 818 | #self.W_in_to_outgate = self.add_param( 819 | # W_in_to_outgate, (num_inputs, num_units), name="W_in_to_outgate") 820 | 821 | self.W_hid_to_outgate = self.add_param( 822 | W_hid_to_outgate, (num_units, num_units), name="W_hid_to_outgate") 823 | 824 | self.b_outgate = self.add_param( 825 | b_outgate, (num_units,), name="b_outgate", regularizable=False) 826 | 827 | 828 | self.W_weightedhid_to_ingate = self.add_param( 829 | W_hid_to_ingate, (num_inputs, num_units), name="W_weightedhid_to_ingate") 830 | 831 | self.W_weightedhid_to_forgetgate = self.add_param( 832 | W_hid_to_forgetgate, (num_inputs, num_units), 833 | name="W_weightedhid_to_forgetgate") 834 | 835 | self.W_weightedhid_to_cell = self.add_param( 836 | W_hid_to_cell, (num_inputs, num_units), name="W_weightedhid_to_cell") 837 | 838 | self.W_weightedhid_to_outgate = self.add_param( 839 | W_hid_to_outgate, (num_inputs, num_units), name="W_weightedhid_to_outgate") 840 | 841 | 842 | 843 | 844 | # Stack input weight matrices into a (num_inputs, 4*num_units) 845 | # matrix, which speeds up computation 846 | #self.W_in_stacked = T.concatenate( 847 | # [self.W_in_to_ingate, self.W_in_to_forgetgate, 848 | # self.W_in_to_cell, self.W_in_to_outgate], axis=1) 849 | 850 | # Same for hidden weight matrices 851 | self.W_hid_stacked = T.concatenate( 852 | [self.W_hid_to_ingate, self.W_hid_to_forgetgate, 853 | self.W_hid_to_cell, self.W_hid_to_outgate], axis=1) 854 | 855 | self.W_weightedhid_stacked = T.concatenate( 856 | [self.W_weightedhid_to_ingate, self.W_weightedhid_to_forgetgate, 857 | self.W_weightedhid_to_cell, self.W_weightedhid_to_outgate], axis=1) 858 | 859 | # Stack biases into a (4*num_units) vector 860 | self.b_stacked = T.concatenate( 861 | [self.b_ingate, self.b_forgetgate, 862 | self.b_cell, self.b_outgate], axis=0) 863 | 864 | # If peephole (cell to gate) connections were enabled, initialize 865 | # peephole connections. These are elementwise products with the cell 866 | # state, so they are represented as vectors. 867 | if self.peepholes: 868 | self.W_cell_to_ingate = self.add_param( 869 | W_cell_to_ingate, (num_units, ), name="W_cell_to_ingate") 870 | 871 | self.W_cell_to_forgetgate = self.add_param( 872 | W_cell_to_forgetgate, (num_units, ), 873 | name="W_cell_to_forgetgate") 874 | 875 | self.W_cell_to_outgate = self.add_param( 876 | W_cell_to_outgate, (num_units, ), name="W_cell_to_outgate") 877 | 878 | self.W_align = self.add_param(W_align, (num_units, self.aln_num_units), 879 | name="AlignSeqOutputLayer: (aln) W_a") 880 | self.U_align = self.add_param(U_align, (num_inputs, self.aln_num_units), 881 | name="AlignSeqOutputLayer: (aln) U_a") 882 | self.v_align = self.add_param(v_align, (self.aln_num_units, 1), 883 | name="AlignSeqOutputLayer: v_a") 884 | 885 | 886 | # Setup initial values for the cell and the hidden units 887 | if isinstance(cell_init, T.TensorVariable): 888 | if cell_init.ndim != 2: 889 | raise ValueError( 890 | "When cell_init is provided as a TensorVariable, it should" 891 | " have 2 dimensions and have shape (num_batch, num_units)") 892 | self.cell_init = cell_init 893 | else: 894 | self.cell_init = self.add_param( 895 | cell_init, (1, num_units), name="cell_init", 896 | trainable=learn_init, regularizable=False) 897 | 898 | if isinstance(hid_init, T.TensorVariable): 899 | if hid_init.ndim != 2: 900 | raise ValueError( 901 | "When hid_init is provided as a TensorVariable, it should " 902 | "have 2 dimensions and have shape (num_batch, num_units)") 903 | self.hid_init = hid_init 904 | else: 905 | self.hid_init = self.add_param( 906 | hid_init, (1, self.num_units), name="hid_init", 907 | trainable=learn_init, regularizable=False) 908 | 909 | def get_output_shape_for(self, input_shapes): 910 | input_shape = input_shapes[0] 911 | return input_shape[0], None, self.num_units 912 | 913 | def get_params(self, **tags): 914 | # Get all parameters from this layer, the master layer 915 | params = super(LSTMAttentionDecodeFeedbackLayer, self).get_params(**tags) 916 | # Combine with all parameters from the child layers 917 | return params 918 | 919 | def get_output_for(self, inputs, **kwargs): 920 | """ 921 | Compute this layer's output function given a symbolic input variable 922 | 923 | Parameters 924 | ---------- 925 | input : theano.TensorType 926 | Symbolic input variable. 927 | mask : theano.TensorType 928 | Theano variable denoting whether each time step in each 929 | sequence in the batch is part of the sequence or not. If ``None``, 930 | then it is assumed that all sequences are of the same length. If 931 | not all sequences are of the same length, then it must be 932 | supplied as a matrix of shape ``(n_batch, n_time_steps)`` where 933 | ``mask[i, j] = 1`` when ``j <= (length of sequence i)`` and 934 | ``mask[i, j] = 0`` when ``j > (length of sequence i)``. 935 | 936 | Returns 937 | ------- 938 | layer_output : theano.TensorType 939 | Symblic output variable. 940 | """ 941 | input = inputs[0] 942 | # Retrieve the mask when it is supplied 943 | mask = inputs[1] if len(inputs) > 1 else None 944 | 945 | # Treat all dimensions after the second as flattened feature dimensions 946 | if input.ndim > 3: 947 | input = input.reshape((input.shape[0], input.shape[1], 948 | T.prod(input.shape[2:]))) 949 | num_batch = input.shape[0] 950 | encode_seqlen = input.shape[1] 951 | 952 | if mask is None: 953 | mask = T.ones((num_batch, encode_seqlen),dtype='float32') 954 | # At each call to scan, input_n will be (n_time_steps, 4*num_units). 955 | # We define a slicing function that extract the input to each LSTM gate 956 | def slice_w(x, n): 957 | return x[:, n*self.num_units:(n+1)*self.num_units] 958 | 959 | # Create single recurrent computation step function 960 | # input_n is the n'th vector of the input 961 | def step(cell_previous, hid_previous, alpha_prev, weighted_hidden_prev, 962 | input, mask, hUa, W_align, v_align, 963 | W_hid_stacked, W_weightedhid_stacked, W_cell_to_ingate, 964 | W_cell_to_forgetgate, W_cell_to_outgate, 965 | b_stacked, *args): 966 | 967 | #compute (unormalized) attetion vector 968 | sWa = T.dot(hid_previous, W_align) # (BS, aln_num_units) 969 | sWa = sWa.dimshuffle(0, 'x', 1) # (BS, 1, aln_num_units) 970 | align_act = sWa + hUa 971 | tanh_sWahUa = self.nonlinearity_align(align_act) 972 | # (BS, seqlen, num_units_aln) 973 | 974 | # CALCULATE WEIGHT FOR EACH HIDDEN STATE VECTOR 975 | a = T.dot(tanh_sWahUa, v_align) # (BS, Seqlen, 1) 976 | a = T.reshape(a, (a.shape[0], a.shape[1])) 977 | # # (BS, Seqlen) 978 | # # ->(BS, seq_len) 979 | 980 | a = a*mask - (1-mask)*10000 981 | 982 | alpha = self.attention_softmax_function(a) 983 | #alpha = T.reshape(alpha, (input.shape[0], input.shape[1])) 984 | 985 | # input: (BS, Seqlen, num_units) 986 | weighted_hidden = input * alpha.dimshuffle(0, 1, 'x') 987 | weighted_hidden = T.sum(weighted_hidden, axis=1) #sum seqlen out 988 | 989 | 990 | # Calculate gates pre-activations and slice 991 | 992 | # (BS, dec_hid) x (dec_hid, dec_hid) 993 | gates = T.dot(hid_previous, W_hid_stacked) + b_stacked 994 | # (BS, enc_hid) x (enc_hid, dec_hid) 995 | gates += T.dot(weighted_hidden, W_weightedhid_stacked) 996 | 997 | # Clip gradients 998 | if self.grad_clipping is not False: 999 | gates = theano.gradient.grad_clip( 1000 | gates, -self.grad_clipping, self.grad_clipping) 1001 | 1002 | # Extract the pre-activation gate values 1003 | ingate = slice_w(gates, 0) 1004 | forgetgate = slice_w(gates, 1) 1005 | cell_input = slice_w(gates, 2) 1006 | outgate = slice_w(gates, 3) 1007 | 1008 | if self.peepholes: 1009 | # Compute peephole connections 1010 | ingate += cell_previous*W_cell_to_ingate 1011 | forgetgate += cell_previous*W_cell_to_forgetgate 1012 | 1013 | # Apply nonlinearities 1014 | ingate = self.nonlinearity_ingate(ingate) 1015 | forgetgate = self.nonlinearity_forgetgate(forgetgate) 1016 | cell_input = self.nonlinearity_cell(cell_input) 1017 | outgate = self.nonlinearity_outgate(outgate) 1018 | 1019 | # Compute new cell value 1020 | cell = forgetgate*cell_previous + ingate*cell_input 1021 | 1022 | if self.peepholes: 1023 | outgate += cell*W_cell_to_outgate 1024 | 1025 | # W_align: (num_units, aln_num_units) 1026 | # U_align: (num_feats, aln_num_units) 1027 | # v_align: (aln_num_units, 1) 1028 | # hUa: (BS, Seqlen, aln_num_units) 1029 | # hid: (BS, num_units_dec) 1030 | # input: (BS, Seqlen, num_inputs) 1031 | 1032 | # Compute new hidden unit activation 1033 | hid = outgate*self.nonlinearity_out(cell) 1034 | 1035 | return [cell, hid, alpha, weighted_hidden] 1036 | 1037 | sequences = [] 1038 | step_fun = step 1039 | 1040 | ones = T.ones((num_batch, 1)) 1041 | if isinstance(self.cell_init, T.TensorVariable): 1042 | cell_init = self.cell_init 1043 | else: 1044 | # Dot against a 1s vector to repeat to shape (num_batch, num_units) 1045 | cell_init = T.dot(ones, self.cell_init) 1046 | 1047 | if isinstance(self.hid_init, T.TensorVariable): 1048 | hid_init = self.hid_init 1049 | else: 1050 | # Dot against a 1s vector to repeat to shape (num_batch, num_units) 1051 | hid_init = T.dot(ones, self.hid_init) 1052 | 1053 | #weighted_hidden_init = T.zeros((num_batch, input.shape[2])) 1054 | alpha_init = T.zeros((num_batch, encode_seqlen)) 1055 | 1056 | weighted_hidden_init = T.zeros((num_batch, self.num_inputs)) 1057 | 1058 | # The hidden-to-hidden weight matrix is always used in step 1059 | 1060 | hUa = T.dot(input, self.U_align) # (num_batch, seq_len, num_units_aln) 1061 | 1062 | non_seqs = [input, mask, hUa, self.W_align, self.v_align, 1063 | self.W_hid_stacked, self.W_weightedhid_stacked] 1064 | # The "peephole" weight matrices are only used when self.peepholes=True 1065 | if self.peepholes: 1066 | non_seqs += [self.W_cell_to_ingate, 1067 | self.W_cell_to_forgetgate, 1068 | self.W_cell_to_outgate] 1069 | # theano.scan only allows for positional arguments, so when 1070 | # self.peepholes is False, we need to supply fake placeholder arguments 1071 | # for the three peephole matrices. 1072 | else: 1073 | non_seqs += [(), (), ()] 1074 | # When we aren't precomputing the input outside of scan, we need to 1075 | # provide the input weights and biases to the step function 1076 | 1077 | non_seqs += [self.b_stacked] 1078 | 1079 | if self.unroll_scan: 1080 | # Explicitly unroll the recurrence instead of using scan 1081 | cell_out, hid_out, alpha_out, weighted_hidden_out = unroll_scan( 1082 | fn=step_fun, 1083 | sequences=sequences, 1084 | outputs_info=[cell_init, hid_init, alpha_init, weighted_hidden_init], 1085 | go_backwards=self.backwards, 1086 | non_sequences=non_seqs, 1087 | n_steps=self.n_decodesteps + self.decode_pre_steps) 1088 | else: 1089 | # Scan op iterates over first dimension of input and repeatedly 1090 | # applies the step function 1091 | cell_out, hid_out, alpha_out, weighted_hidden_out = theano.scan( 1092 | fn=step_fun, 1093 | sequences=sequences, 1094 | outputs_info=[cell_init, hid_init, alpha_init, weighted_hidden_init], 1095 | go_backwards=self.backwards, 1096 | truncate_gradient=self.gradient_steps, 1097 | non_sequences=non_seqs, 1098 | n_steps=self.n_decodesteps + self.decode_pre_steps, 1099 | strict=True)[0] 1100 | 1101 | # dimshuffle back to (n_batch, n_time_steps, n_features)) 1102 | 1103 | #a_out - (n_decodesteps, bs, seqlen) 1104 | #hid_out - (n_decode_steps, bs, num_units) 1105 | 1106 | 1107 | # mask: (BS, encode_seqlen 1108 | # a_out; (n_decodesteps, BS, encode_seqlen) 1109 | cell_out = cell_out.dimshuffle(1, 0, 2) 1110 | hid_out = hid_out.dimshuffle(1, 0, 2) # (BS, n_decodesteps, encode_seqlen) 1111 | mask = mask.dimshuffle(0, 'x', 1) 1112 | alpha_out = alpha_out.dimshuffle(1, 0, 2) # (BS, n_decodesteps, encode_seqlen) 1113 | 1114 | weighted_hidden_out = weighted_hidden_out.dimshuffle(1, 0, 2) 1115 | 1116 | # if scan is backward reverse the output 1117 | if self.backwards: 1118 | hid_out = hid_out[:, ::-1] 1119 | cell_out = cell_out[:, ::-1] 1120 | weighted_hidden_out = weighted_hidden_out[:, ::-1] 1121 | alpha_out = alpha_out[:, ::-1] 1122 | 1123 | if self.decode_pre_steps > 0: 1124 | hid_out = hid_out[:, self.decode_pre_steps:] 1125 | cell_out = hid_out[:, self.decode_pre_steps:] 1126 | weighted_hidden_out = weighted_hidden_out[:, self.decode_pre_steps:] 1127 | alpha_out = hid_out[:, self.decode_pre_steps:] 1128 | 1129 | self.hid_out = hid_out 1130 | self.cell_out = cell_out 1131 | self.weighted_hidden_out = weighted_hidden_out 1132 | self.alpha = alpha_out 1133 | 1134 | if self.return_decodehid: 1135 | return hid_out 1136 | else: 1137 | return weighted_hidden_out 1138 | 1139 | 1140 | -------------------------------------------------------------------------------- /day3-RNN/enc-dec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day3-RNN/enc-dec.png -------------------------------------------------------------------------------- /day4-VAE/Tutorial.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day4-VAE/Tutorial.pdf -------------------------------------------------------------------------------- /day4-VAE/bayes by backprop.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Variational Weight Uncertainty\n", 8 | "\n", 9 | "In this exercise we'll implement Bayes by Backprop.\n", 10 | "\n", 11 | "As background reading it is recommended that you read through the article \"Practical Variational Inference for Neural Networks\", Graves, NIPS 2011:\n", 12 | "http://papers.nips.cc/paper/4329-practical-variational-inference-for-neural-networks.pdf\n", 13 | "\n", 14 | "For the implementation of the model you must read the article \"Weight Uncertainty in Neural networks\", Blundell et al, ICML 2015:\n", 15 | "http://jmlr.org/proceedings/papers/v37/blundell15.pdf\n", 16 | "\n", 17 | "First we are running through some math to understand the variational inference for neural networks." 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "## Exercise 1\n", 25 | "Use Jensen's inequality to show that, if $q(\\theta)\\ne 0$,\n", 26 | "\n", 27 | "$\\log p(y|x) \\geq E_{q(\\theta)}\\left[ \\log p(y|x,\\theta)p(\\theta) - \\log q(\\theta) \\right]$" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "###Solution\n", 35 | "The derivation looks as follows:\n", 36 | "\n", 37 | "$\\log p(y|x) = \\log \\int_{\\theta} p(\\theta, y|x)d\\theta$\n", 38 | "\t\t\t \n", 39 | "$= \\log \\int_{\\theta} p(y|x,\\theta)p(\\theta)d\\theta$\n", 40 | "\t\t\t\t\n", 41 | "$= \\log \\int_{\\theta} p(y|x,\\theta) p(\\theta)\\frac{q(\\theta)}{q(\\theta)}d\\theta$\n", 42 | " \n", 43 | "$= $ TODO: Continue... Think about expectations." 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "Now we use Jensen's inequality $f(E[X]) \\geq E[f(X)]$ when f is concave ($\\log(x)$ is concave since the second derivative is strictly negative), to push the log into the expectations." 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "$\\log p(y|x) = $ TODO: Finish the proof." 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "## Exercise 2\n", 65 | "Now show that if $q(\\theta)\\ne 0,$\n", 66 | "\n", 67 | "where $\\mu$ is the mean vector and $D$ is a diagonal covariance matrix, then\n", 68 | "\n", 69 | "$E_{q(\\theta)}\\left[ \\log p(y|x,\\theta)p(\\theta) - \\log q(\\theta) \\right] = E_{\\mathcal{N}(\\epsilon|0,I)}\\left[ \\log p(y|x,\\theta)p(\\theta) - \\log q(\\theta) \\right]$\n", 70 | "\n" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "###Solution\n", 78 | "To solve this problem you need to apply the reparameterization trick introduced in: http://arxiv.org/abs/1312.6114 and http://jmlr.org/proceedings/papers/v37/blundell15.pdf. \n", 79 | "\n", 80 | "The basic idea is to change the expectation from being over $q(\\theta)$ to be over $p(\\epsilon) \\sim \\mathcal{N}(0,I)$. We assume that the following is true:\n", 81 | "$\\theta = t(\\theta,\\epsilon)$.\n", 82 | "\n", 83 | "Where $t(\\theta,\\epsilon)$ is a deterministic function and $\\epsilon$ is distributed according to some distribution fulfilling: $p(\\epsilon) d \\epsilon = q(\\theta)d\\theta$.\n", 84 | "\n", 85 | "From defining $q(\\theta)=t(\\theta,\\epsilon)=\\mathcal{N}(\\mu,\\sigma)$ it follows that,\n", 86 | "\n", 87 | "$\\theta \\sim q(\\theta) = t(\\theta,\\epsilon) = \\mu + \\sigma \\epsilon$\n", 88 | "\n", 89 | "$\\epsilon \\sim \\mathcal{N}(0,I)$\n", 90 | "\n", 91 | "Armed with these facts we perform the calculations:\n", 92 | "\n", 93 | "TODO: Perform the calculations." 94 | ] 95 | }, 96 | { 97 | "cell_type": "markdown", 98 | "metadata": {}, 99 | "source": [ 100 | "## Exercise 3\n", 101 | "Show that if\n", 102 | "\n", 103 | "$p(\\theta) = \\mathcal{N}(\\theta|0,\\sigma^2I)$\n", 104 | "\n", 105 | "$q(\\theta) = \\mathcal{N}(\\theta|\\mu,D)$\n", 106 | "\n", 107 | "$D = diag(\\left[e^{2d_1},e^{2d_2},...,e^{2d_p}\\right])$\n", 108 | "\n", 109 | "$\\mathcal{F}(\\mu,D)=\\mathbb{E}_{q(\\theta)} \\left[ \\log p(x|\\theta) p(\\theta) - \\log q(\\theta) \\right]$\n", 110 | "\n", 111 | "Then if $\\theta = \\mu + D^{\\frac{1}{2}}\\epsilon$\n", 112 | "\n", 113 | "$\\frac{\\partial \\mathcal{F}(\\mu,D)}{\\partial \\mu_i} = E_{\\mathcal{N}(\\epsilon|0,I)}\\left[ \\frac{\\partial logp(y|x,\\theta)}{\\partial \\theta} + \\frac{\\partial logp(\\theta)}{\\partial \\theta} - \\frac{\\partial logq(\\theta)}{\\partial \\theta} \\right]$\n", 114 | "\n", 115 | "$\\frac{\\partial \\mathcal{F}(\\mu,D)}{\\partial d_i} = E_{\\mathcal{N}(\\epsilon|0,I)}\\left[ (\\frac{\\partial logp(y|x,\\theta)}{\\partial \\theta} + \\frac{\\partial logp(\\theta)}{\\partial \\theta} - \\frac{\\partial logq(\\theta)}{\\partial \\theta})e^{d_i} \\epsilon_i \\right]$" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "###Solution\n", 123 | "In exercise 2 we showed that we could change from expectations over $q(\\theta)$ to $p(\\theta)$:\n", 124 | "\n", 125 | "$\\frac{\\partial}{\\partial\\theta}\\mathbb{E}_{q(\\theta)} \\left[ \\log p(y|x,\\theta) p(\\theta) - \\log q(\\theta) \\right]$\n", 126 | "\n", 127 | "$= \\frac{\\partial}{\\partial\\theta}\\mathbb{E}_{p(\\epsilon)} \\left[ \\log p(y|x,\\theta) p(\\theta) - \\log q(\\theta)\\right]$ \n", 128 | "\n", 129 | "$= \\mathbb{E}_{p(\\epsilon)} \\left[ \\frac{\\partial}{\\partial\\theta}\\log p(y|x,\\theta)+ \\frac{\\partial}{\\partial\\theta}p(\\theta) - \\frac{\\partial}{\\partial\\theta}\\log q(\\theta) \\right]$\n", 130 | "\n", 131 | "Now we use the chain rule of derivatives, $\\frac{\\partial\\theta}{\\partial\\mu_i} = 1$ and $\\frac{\\partial\\theta}{\\partial d_i} = e^{d_i}\\epsilon_i$ to show that:\n", 132 | "\n", 133 | "$\\theta = \\mu + D^{1/2} \\epsilon = diag([e^{2d_1},e^{2d_2}\\dots,^{2d_p}])$\n", 134 | "\n", 135 | "$\\frac{\\partial \\mathcal{F}}{\\partial\\mu_i} = $ TODO: Apply the chain rule.\n", 136 | "\n", 137 | "$\\frac{\\partial \\mathcal{F}}{\\partial d_i} = $ TODO: Apply the chain rule." 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "metadata": {}, 143 | "source": [ 144 | "## Exercise 4\n", 145 | "What is the fewest number of times backpropagation be run to compute unbiased estimates both $\\frac{\\partial \\mathcal{F}(\\mu,D)}{\\partial \\mu_i}$ and $\\frac{\\partial \\mathcal{F}(\\mu,D)}{\\partial d_i}$?" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "metadata": {}, 151 | "source": [ 152 | "###Solution\n", 153 | "How many samples do you need to make a monte carlo integral?" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 10, 159 | "metadata": { 160 | "collapsed": true 161 | }, 162 | "outputs": [], 163 | "source": [ 164 | "import numpy as np\n", 165 | "import theano\n", 166 | "theano.config.floatX = 'float32'\n", 167 | "import theano.tensor as T\n", 168 | "from lasagne.layers.base import Layer\n", 169 | "from theano.tensor.shared_randomstreams import RandomStreams\n", 170 | "from lasagne.nonlinearities import rectify, identity, linear\n", 171 | "from lasagne.updates import adam, sgd\n", 172 | "from lasagne.layers import get_all_layers, get_output, get_all_params, InputLayer\n", 173 | "from lasagne import init\n", 174 | "import math" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "metadata": {}, 180 | "source": [ 181 | "##Exercise 5\n", 182 | "Generate a simple regression data set by sampling points from the curve:\n", 183 | "\n", 184 | "$y= x+0.3sin(2\\pi(x+v))+0.3sin(4\\pi(x+v))+v$\n", 185 | "\n", 186 | "$v\\sim\\mathcal{N}(0,0.02)$" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 11, 192 | "metadata": { 193 | "collapsed": false 194 | }, 195 | "outputs": [], 196 | "source": [ 197 | "def _shared_dataset(data_xy, borrow=True):\n", 198 | " \"\"\"\n", 199 | " Share the dataset for use on the GPU.\n", 200 | " :param data_xy: The data set containing a matrix x and y in a tuple.\n", 201 | " :param borrow: Refer to the theano docs.\n", 202 | " :return: The shared data set.\n", 203 | " \"\"\"\n", 204 | " data_x, data_y = data_xy\n", 205 | " shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)\n", 206 | " shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)\n", 207 | " return shared_x, shared_y\n", 208 | "\n", 209 | "def generate_synthetic_data(dat_size=1.e4):\n", 210 | " rng = np.random.RandomState(1234)\n", 211 | " x = rng.uniform(0., 1., dat_size).reshape((dat_size, 1))\n", 212 | " v = rng.normal(0, 0.02, size=x.shape)\n", 213 | " y = # TODO: Write y.\n", 214 | " # 90% for training, 10% testing\n", 215 | " train_x = x[:len(x)*0.9]\n", 216 | " train_y = y[:len(y)*0.9]\n", 217 | " test_x = x[len(x)*0.9:]\n", 218 | " test_y = y[len(y)*0.9:]\n", 219 | " return _shared_dataset((train_x, train_y)), _shared_dataset((test_x, test_y))\n", 220 | "\n", 221 | "(train_x, train_t), (test_x, test_t) = generate_synthetic_data(1e4)\n", 222 | "test_x_unshared = test_x.eval()\n", 223 | "test_t_unshared = test_t.eval()" 224 | ] 225 | }, 226 | { 227 | "cell_type": "markdown", 228 | "metadata": {}, 229 | "source": [ 230 | "##Exercise 6\n", 231 | "Construct a feed forward neural network that represents $p(y|x,\\theta)$ with architecture 1-50-50-1. Run it on the generated data and evaluate the performance." 232 | ] 233 | }, 234 | { 235 | "cell_type": "markdown", 236 | "metadata": {}, 237 | "source": [ 238 | "##Exercise 7\n", 239 | "Implement a network for Bayes by Backdrop $p(\\theta)$ and $q(\\theta)$.\n", 240 | "\n", 241 | "During training, estimate the loss function $\\mathcal{F}(\\mu,D)$ and likelihood $logp(y|x,\\theta)$." 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "Following we'll write the functions for distribution density estimations." 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 12, 254 | "metadata": { 255 | "collapsed": false 256 | }, 257 | "outputs": [], 258 | "source": [ 259 | "c = - 0.5 * math.log(2*math.pi)\n", 260 | "def normal(x, mean, sd):\n", 261 | " return c - T.log(T.abs_(sd)) - (x - mean)**2 / (2 * sd**2)\n", 262 | "\n", 263 | "def normal2(x, mean, logvar):\n", 264 | " return c - logvar/2 - (x - mean)**2 / (2 * T.exp(logvar))" 265 | ] 266 | }, 267 | { 268 | "cell_type": "markdown", 269 | "metadata": {}, 270 | "source": [ 271 | "Construct the lasagne layer." 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": 13, 277 | "metadata": { 278 | "collapsed": true 279 | }, 280 | "outputs": [], 281 | "source": [ 282 | "class BayesBackpropLayer(Layer):\n", 283 | "\n", 284 | " def __init__(self, incoming, num_units, W=init.Normal(0.05), b=init.Normal(0.05), nonlinearity=rectify,\n", 285 | " prior_sd=T.exp(-3), **kwargs):\n", 286 | " super(BayesBackpropLayer, self).__init__(incoming, **kwargs)\n", 287 | " self._srng = RandomStreams()\n", 288 | "\n", 289 | " self.num_units = num_units\n", 290 | " self.num_inputs = int(np.prod(self.input_shape[1:]))\n", 291 | " self.nonlinearity = (identity if nonlinearity is None else nonlinearity)\n", 292 | " self.prior_sd = prior_sd\n", 293 | "\n", 294 | " self.W = T.zeros((self.num_inputs, num_units))\n", 295 | " self.W_mu = self.add_param(W, (self.num_inputs, num_units), name=\"W_mu\")\n", 296 | " self.W_logsigma = self.add_param(W, (self.num_inputs, num_units), name=\"W_sigma\")\n", 297 | " self.W_params = [self.W, self.W_mu, self.W_logsigma]\n", 298 | " self.b = T.zeros((num_units,))\n", 299 | " self.b_mu = self.add_param(b, (num_units,))\n", 300 | " self.b_logsigma = self.add_param(b, (num_units,))\n", 301 | " self.params = [self.W_mu, self.W_logsigma, self.b_mu, self.b_logsigma]\n", 302 | "\n", 303 | " def get_output_shape_for(self, input_shape):\n", 304 | " return (input_shape[0], self.num_units)\n", 305 | "\n", 306 | " def get_output_for(self, input, deterministic=False, **kwargs):\n", 307 | " if input.ndim > 2:\n", 308 | " # if the input has more than two dimensions, flatten it into a\n", 309 | " # batch of feature vectors.\n", 310 | " input = input.flatten(2)\n", 311 | "\n", 312 | " if deterministic:\n", 313 | " activation = T.dot(input, self.W_mu) + self.b_mu.dimshuffle('x', 0)\n", 314 | " else:\n", 315 | " W = self.get_W()\n", 316 | " b = self.get_b()\n", 317 | " activation = T.dot(input, W) + b.dimshuffle('x', 0)\n", 318 | " return self.nonlinearity(activation)\n", 319 | "\n", 320 | " def get_y_mu_sigma(self, x):\n", 321 | " layers = get_all_layers(self)\n", 322 | " # output from sampled weights of all layers-1.\n", 323 | " z = get_output(layers[-2], x, deterministic=False)\n", 324 | " # sampled output of the final layer.\n", 325 | " y = self.nonlinearity(T.dot(z, self.get_W()) + self.get_b().dimshuffle('x', 0))\n", 326 | " # mean output of the final layer.\n", 327 | " y_mu = self.nonlinearity(T.dot(z, self.W_mu) + self.b_mu.dimshuffle('x', 0))\n", 328 | " # logsigma output of the final layer.\n", 329 | " y_logsigma = self.nonlinearity(T.dot(z, self.W_logsigma) + self.b_logsigma.dimshuffle('x', 0))\n", 330 | " return y, y_mu, y_logsigma\n", 331 | "\n", 332 | " def get_log_distributions(self, x, t, n_samples=1):\n", 333 | " #TODO: calculate the log distributions.\n", 334 | " def one_sample(_x, _t):\n", 335 | " y, y_mu, y_logsigma = self.get_y_mu_sigma(_x)\n", 336 | " # logP(D|w)\n", 337 | " _log_pd_given_w = normal2(_t, y, T.log(self.prior_sd ** 2)).sum()\n", 338 | " # logq(w) logp(w)\n", 339 | " _log_qw, _log_pw = 0., 0.\n", 340 | " layers = get_all_layers(self)[1:]\n", 341 | " for layer in layers:\n", 342 | " W = layer.W\n", 343 | " b = layer.b\n", 344 | " _log_qw += normal2(W, layer.W_mu, layer.W_logsigma * 2).sum()\n", 345 | " _log_qw += normal2(b, layer.b_mu, layer.b_logsigma * 2).sum()\n", 346 | " _log_pw += normal(W, 0., self.prior_sd).sum()\n", 347 | " _log_pw += normal(b, 0., self.prior_sd).sum()\n", 348 | " return _log_qw, _log_pw, _log_pd_given_w\n", 349 | "\n", 350 | " log_qw, log_pw, log_pd_given_w = 0., 0., 0.\n", 351 | " for i in range(n_samples):\n", 352 | " log_qw_temp, log_pw_tmp, log_pd_given_w_tmp = one_sample(x, t)\n", 353 | " log_qw += log_qw_temp\n", 354 | " log_pw += log_pw_tmp\n", 355 | " log_pd_given_w += log_pd_given_w_tmp\n", 356 | "\n", 357 | " log_qw /= n_samples\n", 358 | " log_pw /= n_samples\n", 359 | " log_pd_given_w /= n_samples\n", 360 | " return log_qw, log_pw, log_pd_given_w\n", 361 | "\n", 362 | " def get_params(self):\n", 363 | " return self.params\n", 364 | "\n", 365 | " def get_W(self):\n", 366 | " # TODO: Sample the weights and return.\n", 367 | " W = T.zeros(self.W_mu.shape)\n", 368 | " eps = # Sample the epsilon.\n", 369 | " W += # Recall the reparameterization trick.\n", 370 | " self.W = W\n", 371 | " return W\n", 372 | "\n", 373 | " def get_b(self):\n", 374 | " # TODO: Sample the bias and return.\n", 375 | " b = T.zeros(self.b_mu.shape)\n", 376 | " eps = # Sample the epsilon.\n", 377 | " b += # Recall the reparameterization trick.\n", 378 | " self.b = b\n", 379 | " return b" 380 | ] 381 | }, 382 | { 383 | "cell_type": "markdown", 384 | "metadata": {}, 385 | "source": [ 386 | "Define the model class." 387 | ] 388 | }, 389 | { 390 | "cell_type": "code", 391 | "execution_count": 14, 392 | "metadata": { 393 | "collapsed": true 394 | }, 395 | "outputs": [], 396 | "source": [ 397 | "class VNN:\n", 398 | " def __init__(self, n_in, n_hidden, n_out, trans_func=rectify, out_func=linear, W=init.Normal(0.05),\n", 399 | " b=init.Normal(0.05), batch_size=100, n_samples=10, prior_sd=T.exp(-3)):\n", 400 | " self.n_in = n_in\n", 401 | " self.n_hidden = n_hidden\n", 402 | " self.n_out = n_out\n", 403 | " self.batch_size = batch_size\n", 404 | " self.transf = trans_func\n", 405 | " self.outf = out_func\n", 406 | " self.n_samples = n_samples\n", 407 | " self.l_in = InputLayer(shape=(batch_size,n_in))\n", 408 | "\n", 409 | " # Define the model\n", 410 | " l_prev = self.l_in\n", 411 | " for n_hid in n_hidden:\n", 412 | " l_tmp = BayesBackpropLayer(l_prev, num_units=n_hid, W=W, b=b, nonlinearity=self.transf, prior_sd=prior_sd)\n", 413 | " l_prev = l_tmp\n", 414 | " self.model = BayesBackpropLayer(l_prev, num_units=n_out, nonlinearity=self.outf)\n", 415 | " self.x = T.matrix('x')\n", 416 | " self.t = T.matrix('t')\n", 417 | "\n", 418 | " def build_model(self, train_x, train_t, test_x, test_t, update, update_args):\n", 419 | " self.train_x = train_x\n", 420 | " self.train_t = train_t\n", 421 | " self.test_x = test_x\n", 422 | " self.test_t = test_t\n", 423 | " self.update = update\n", 424 | " self.update_args = update_args\n", 425 | " self.index = T.iscalar('index')\n", 426 | " self.batch_slice = slice(self.index * self.batch_size, (self.index + 1) * self.batch_size)\n", 427 | "\n", 428 | " log_qw, log_pw, log_pd_given_w = self.model.get_log_distributions(self.x, self.t, self.n_samples)\n", 429 | "\n", 430 | " n_tot = self.train_x.shape[0].astype(theano.config.floatX)\n", 431 | " n_batches = n_tot / self.batch_size\n", 432 | " \n", 433 | " # TODO: Calculate the loss function. Remember to scale the weight difference by the batchsize.\n", 434 | " loss = ???\n", 435 | "\n", 436 | " all_params = get_all_params(self.model)\n", 437 | " updates = self.update(loss, all_params, *self.update_args)\n", 438 | "\n", 439 | " train_model = theano.function(\n", 440 | " [self.index], loss,\n", 441 | " updates=updates,\n", 442 | " givens={\n", 443 | " self.x: self.train_x[self.batch_slice],\n", 444 | " self.t: self.train_t[self.batch_slice],\n", 445 | " },\n", 446 | " )\n", 447 | "\n", 448 | " test_model = theano.function(\n", 449 | " [self.index], loss,\n", 450 | " givens={\n", 451 | " self.x: self.test_x[self.batch_slice],\n", 452 | " self.t: self.test_t[self.batch_slice],\n", 453 | " },\n", 454 | " )\n", 455 | "\n", 456 | " return train_model, test_model\n", 457 | "\n", 458 | " def get_output(self, dat, deterministic=False):\n", 459 | " return get_output(self.model, dat, deterministic=deterministic)" 460 | ] 461 | }, 462 | { 463 | "cell_type": "markdown", 464 | "metadata": {}, 465 | "source": [ 466 | "Run the network. Try to sample the weights multiple times and evaluate the difference." 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": null, 472 | "metadata": { 473 | "collapsed": false 474 | }, 475 | "outputs": [], 476 | "source": [ 477 | "import time\n", 478 | "batch_size = 100\n", 479 | "n_epochs = 100\n", 480 | "eval_train = {}\n", 481 | "eval_test = {}\n", 482 | "\n", 483 | "model = VNN(1, [50, 50], 1, trans_func=rectify, out_func=linear, batch_size=batch_size, n_samples=5)\n", 484 | "train_model, test_model = model.build_model(train_x, train_t, test_x, test_t, adam, update_args=(0.001,))\n", 485 | "n_train_batches = train_x.get_value(borrow=True).shape[0] / batch_size\n", 486 | "n_test_batches = test_x.get_value(borrow=True).shape[0] / batch_size\n", 487 | "\n", 488 | "\n", 489 | "done_looping = False\n", 490 | "epoch = 0\n", 491 | "for epoch in range(n_epochs):\n", 492 | " start_time = time.time()\n", 493 | " epoch += 1\n", 494 | " avg_costs = []\n", 495 | " for minibatch_index in xrange(n_train_batches):\n", 496 | " minibatch_avg_cost = train_model(minibatch_index)\n", 497 | " avg_costs.append(minibatch_avg_cost)\n", 498 | " eval_train[epoch] = np.mean(avg_costs)\n", 499 | " test_losses = [test_model(i) for i in xrange(n_test_batches)]\n", 500 | "\n", 501 | " eval_test[epoch] = np.mean(test_losses)\n", 502 | "\n", 503 | " end_time = time.time() - start_time\n", 504 | " print \"[epoch,time,train,test];%i;%.2f;%.10f;%.10f\" % (epoch, end_time, eval_train[epoch], eval_test[epoch])" 505 | ] 506 | }, 507 | { 508 | "cell_type": "code", 509 | "execution_count": 41, 510 | "metadata": { 511 | "collapsed": false 512 | }, 513 | "outputs": [ 514 | { 515 | "data": { 516 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAERCAYAAABVU/GxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XucXVV5//HPPnPLJJlJSAgJ95vwFESughSEwK8IgsZY\nbROvRVTQgFTaWK0pSoEg9kejFi9pixfgJ2KCl8YxGmkVBNNKoOIF0QciyB2SEHKZzO3M2ev3x9rD\nGYZkMjkze2Zyzvf9eu1X5qyz99n7PJk5z3nW2nvtJISAiIjIriqM9QGIiMjuSQlEREQqogQiIiIV\nUQIREZGKKIGIiEhFlEBERKQi9Xm9sJnVATcAhwMB+CDQCHwfeChb7UvufpuZXQhcBPQCi919pZk1\nA18HZgBbgfPdfUNexysiIrsmyes6EDObC8xx9/eb2Wzgb4A2YIq7f6bferOA24ETgGbgZ8CrgQ8B\nk939KjObD/ypu1+Wy8GKiMguy60CcfcVZvb97OFBwCZikrAsuTwMXAacBKx29yJQNLO1wNHAqcA/\nZduvAj6R17GKiMiuy3UMxN1LZnYj8C/ALcAa4CPuPht4BLgCaAE299tsKzAFaAW2DGgTEZFxIvdB\ndHd/D2DE8ZDb3f3+7KnvAscRk0RLv01aiNVK//a+NhERGSfyHER/N7Cfu18LdAIp8B0zu9Td7wXO\nAu4jViXXmFkTMAE4AngAWA2cB9wLnAvcNYTddgFNI/1eRESqXFLRRjkOojcDNwKzgAbgWuBx4ItA\nEXgGuMjd283s/cSzsArANe7+3Wz7m4C9gW7gHe6+bie7DVQYiCqkWJQpFmWKRZliMUy5JZAxol+I\nMsWiTLEoUyzKFIth0oWEIiJSESUQERGpiBKIiIhURAlEREQqogQiIiIVUQIREZGKKIGISE0zsyYz\ne98urH++mc0Z5PmPmdmJI3N0YGa/2cnzF5lZbheFD2ZMdioiMo7sDbwf+MpQVnb3m3by/D8N9nwO\nPk686Lp3lPerBCIi48echSuuA/5yhF/2trYlc/9ukOf/ATjSzD5B7JU5FZgEvA84nziL+HTgV+7+\nXjP7R+JMGr8H/p44U8YhwDfd/VPZBLK3EhPTecTbVBwK/JO732RmJwFfIE4Suw7ocvcL+g7GzCYS\n74W0J/AHoC5rnw18MjvGycA7gNOJs33camZ/Afw7sF+27++5e66zmKsLS0Rq3WLgQXe/Onv8W3c/\nFXgK2OjuZwMnAieb2T7EK9j7HAC8BTgZ+GjWFvr92+ruc4A3EZMNwL8Sb5D3Z8QEMdAHs2M4Hfg0\n8UZ8AEcC73L3M4HvAH/p7l8BngXeBuwP/I+7vx54TfY6uVIFIiLjRlYpDFYt5GHgdCZ9d0ztAvYy\ns28A7cRv/Q0D1v2Nu6dAh5l1bue1f5n9+yRxsliAvd39d9nPdxM//Psz4AcA7u5mtj5rfxq43sza\ngX2JN9/r7wXgRDM7kzibee4Ty6oCEZFal1L+LEyyxxBnAd/f3d9B7OZq5uXJZmeTCW7v+SfM7Ijs\n5z/dzvMPErvRMLNDiV1ZELun3pN1dz3d75hTYjfXe4BN7v4u4DPAxJ0c27CpAhGRWvcc0Ghmnybe\neqLvQ/8e4HIz+wmxm+geYJ/suTDg34E/b6+t7+eLga9mlUQPsausv3/Nnv8Z8EdgY9b+deBuM3ua\nOP6yd9Z+N7CSeBvwb5jZCcBjwH1mtre7PzP426+cZuOtXopFmWJRpliUjUkszOxiYLm7bzCzq4Fu\nd1882scxElSBiIiMrueA27MKZBPxTK/dkiqQ6qVYlCkWZYpFmWIxTBpEFxGRilRlApmzcMXsOQtX\nfGSsj0NEpJpVZQIB/ga4bs7CFZPH+kBERKpVtSaQUvZv7hfSiIjUqtzOwjKzOuAG4HDiYNUHiXPG\n3Ei88OUB4BJ3D2Z2IXARcTKwxe6+0syaiec9zyDOGXO+u28Y4u6L2b+Ng64lIjXPzJqIU4QMaTLF\nftudRrxw7zdm9m13f+sIHc9fAK909yt38PwewOvd/daR2N9w5FmBvBFI3f21wOXAp4AlwKJsjpcE\nmGtms4BLgVOAc4BrzawRWECcvOx04ObsNYaqJ/t34LQDIiID9c3Gu6veR3Zh4UgljyE6hji31pjL\nrQJx9xVm9v3s4UHEeVrOcve7srYfAmcTu5tWu3sRKJrZWuBo4qX8fdMirwJ2ZVbJvgSiCkRkNzJv\n2YJcZuNdPn/pUGbjvRy4HvgqMC177q/d/QEz+xpxRt1m4F+I042cAxxrZg8Ca9x9bzO7E7gfOApo\nJU54+Hg20++bgfXEKUY+4e4/7TsAMzsF+BzxupAu4L6s/VoGzAacHe/RZvZ+4OfEL+Z1xClPFrj7\n/1Qeql2T6xiIu5eyqY3/BbiFl55zvRWYQgzy5h20bxnQNlTqwhKRoeqbjXcx8cP5v9z9/wAfAJaa\n2WTgNODPgdcDJXf/BfGL7Ufd/Yl+rxWAe9z9dcB/Am83s2Oy7V5NTCJ78/JpT5YC78xm/v0NkJhZ\nC9ufDXgx8BN3/zJxht6F7n4W8Qv3BYyi3K9Ed/f3mNlMYA3l2SghJohNxCTR0q+9ZTvtfW1DpQpE\nZDeUVQpjORvvq4AzzWx+9ngPd283s8uIY7qtxLHZwdyf/fsE8V4df0KsUALQZWb38fILGGe5+8PZ\nz3cRp4fvBGYOmA24fsC2TwOfyGYCbuGlX8Zzl+cg+ruB/dz9WmIgSsTJvWZnpdu5wI+JieWabCBr\nAnAEcYB9NfFmLPdm69718r1sV/jzM17Bd+9cy5IPn37/zlevalU1zcAwKRZlikVZuOOOO/jwhz8M\nEC644AKOOuoo3vjGN/Lcc8/R1tbG+vXrw6WXXsqHPvQhuru7OeOMM/6sVCp97a1vfSvnnHPOewBm\nzJgBEE466SSuuuqq3wNceeWVbNiwgbPPPpsrrriCEMJlxWKRgw8+mKuvvvolU7gfeeSRPPzww+Gw\nww7jQx/6EEmScOSRR/5jW1sbn/3sZ9m4cSPnnXce3/72tx97+umnueWWWwDe9spXvpLrrruOQw89\nlM9//vM89dRTUFkVUtEV+blNZZKdRXUjMQM3ANcSZ5C8gVgZPAhcmJ2F9X7iWVgF4Bp3/262/U3E\ncq8beIe7r9vJbgOQzFm44hpgEfDatiVzV4/4m9s9aJqGMsWiTLEoC8SuoibiWMIq4DrirW2nEquN\nK9z9+2a2lDg2WwLa3P06M7uIOLPu24hdSvuY2R3AB9z9ITP7ADDT3a8ys0XAXGAD8SZUF/UfqzCz\n44EvESuN54HfAv8GtGVtz2bHcw1xpt3/yp4vAO8lVjv3Aa9x93PyCdfLVeVcWHMWrvhH4Arg/7Qt\nmXvH2B7SmNEHRZliUaZYlI1KLMxsBvAX7r40S1YPAGe6+5N57ztv1Tobr8ZARGS82EC8U+AFxKR1\nQzUkD6j+BKLrQERkTGWD5+8d6+PIQ7VOZaIKREQkZ9WaQHQdiIhIzqo1gagCERHJWbUnEI2BiIjk\npFoTiLqwRERyVq0JRF1YIiI5q/YEoi4sEZGcVHsCUQUiIpKTak0gGgMREclZtSYQVSAiIjmr9gSi\nMRARkZxUewJRBSIikpNqTSAaAxERyVm1JhBVICIiOav2BKIxEBGRnFR7AlEFIiKSk2pNIBoDERHJ\nWbUmEHVhiYjkLLdb2ppZA/BV4ECgCVgMPAl8H3goW+1L7n6bmV0IXAT0AovdfaWZNQNfB2YAW4Hz\n3X3DEHevLiwRkZzleU/0dwLr3f3dZrYH8CvgSmCJu3+mbyUzmwVcCpwANAM/M7P/BBYAv3L3q8xs\nPnA5cNkQ960uLBGRnOXZhXUb8Ml++ykSk8QbzOynZvZlM5sMnASsdveiu28B1gJHA6cCq7LtVwFn\nDXXHbUvmpkAJJRARkdzklkDcfZu7t5tZCzGZ/AOwBviIu88GHgGuAFqAzf023QpMAVqBLQPadkUP\nGgMREclNnl1YmNn+wHeAL7r7N81sirv3JYvvAp8H7iImkT4twCZi8mgZ0DYUAWDShHr2mjbxxL7H\nNaqW3/tAikWZYlGmWERJJRvlOYg+E7gduNjd78iaV5nZX7v7vcQuqfuIVck1ZtYETACOAB4AVgPn\nAfcC5xITzVAkANu6etc/+vSW9cCRI/SWdjeBCn8pqpBiUaZYlCkWw5RnBbKI2O30STPrGwu5DPis\nmRWBZ4CLsm6u64G7iV1qi9y928yWAjeZ2d1AN/COXdx/DxoDERHJTRJCVVVwL36jmLNwxaNAoW3J\n3APH9pDGjL5dlSkWZYpFmWIxTNV6ISGoAhERyVU1J5AiSiAiIrmp5gSiCkREJEfVnkB0HYiISE6q\nPYE0zlm4QoNkIiI5qOYEUiSeYVE31gciIlKNqjmBaEp3EZEc1UIC0UC6iEgOqjmBaEp3EZEcVXMC\nUQUiIpKjWkggGgMREclBLSQQVSAiIjmo5gSiMRARkRxVcwJRBSIikqNaSCAaAxERyUEtJBBVICIi\nOajmBKIxEBGRHFVzAlEXlohIjmohgagCERHJQTUnEHVhiYjkqD6vFzazBuCrwIFAE7AY+B1wI5AC\nDwCXuHswswuBi4BeYLG7rzSzZuDrwAxgK3C+u2/YhUNQBSIikqM8K5B3Auvd/XTg9cAXgSXAoqwt\nAeaa2SzgUuAU4BzgWjNrBBYAv8rWvRm4fBf3rzEQEZEc5ZlAbgM+2W8/ReB4d78ra/shcBZwIrDa\n3YvuvgVYCxwNnAqsytZdla27K1SBiIjkKLcuLHffBmBmLcRkcjnwz/1W2QpMAVqBzTto3zKgbVdo\nDEREJEe5DqKb2f7AT4Cb3f1W4thHn1ZgEzFJtPRrb9lOe1/bUAQgLHrPScsA3vemo77Q11ZjC2Ow\nz/G6KBaKhWKx81jsstwSiJnNBG4HPuruN2bN95vZ7Oznc4G7gDXAaWbWZGZTgCOIA+yrgfMGrDsU\nCZB86sY1bwD4yvce+FhfW40tjME+x+uiWCgWisXOY7HLcuvCAhYRu50+aWZ9YyEfBq7PBskfBL6V\nnYV1PXA3MaEtcvduM1sK3GRmdwPdwDt2cf8aAxERyVESQsXVy3gUyLLpnIUrZgN3Ale3LZn7ycE2\nqlIvxkIUi34UizLFYpiq+UJCVSAiIjmqhQSi60BERHJQCwlEFYiISA6qOYHoOhARkRxVcwJRF5aI\nSI5qIYGoAhERyUE1JxB1YYmI5KiaE4gqEBGRHNVCAtEYiIhIDmohgagCERHJQTUnkN7sXyUQEZEc\nVG0CaVsyNxAH0pVARERyULUJJNODxkBERHJRCwlEFYiISA6GlEDMbJ/s39PN7BIzm5TvYY0YdWGJ\niORkpwnEzP4VuNzMXgncAhwP3Jz3gY0QVSAiIjkZSgVyEnAJ8JfAV939fcCBuR7VyNEYiIhIToaS\nQArZMhf4QdZ9NTHXoxo5qkBERHIylARyM/AM8Ji73wPcB/x7rkc1cjQGIiKSk50mEHf/DLC3u785\na3qtu38u38MaMerCEhHJSf3OVjCzOcBrzWwxsAbYy8yucPcvDGUHZvYa4NPufqaZHQe0AQ9nT3/J\n3W8zswuBi4hXjy9295Vm1gx8HZgBbAXOd/cNu/j+1IUlIpKToXRhXQF8DZhPTCAHAhcM5cXN7KPA\nDUBT1nQC8Bl3PzNbbjOzWcClwCnAOcC1ZtYILAB+5e6nE7vRLh/623pREWiYs3BFUsG2IiIyiCFd\nB+LuvwfeALS5eztD7xZaC7wF6PsAPwF4g5n91My+bGaTiWd5rXb3ortvybY5GjgVWJVttwo4a4j7\n7E8z8oqI5GQoCeQ5M/sCcCKwysyWAI8P5cXd/TuUJzUEuAf4iLvPBh4hVjctwOZ+62wFpgCtwJYB\nbbtKCUREJCdDSSBvJ3ZdnZFVHw9nbZX4rrvf3/czcBwxSbT0W6cF2DSgva9tKELfcvJRs84DuPXq\nc9v7t9fIwhjsc7wuioVioVjsPBa7bCgJpB2YDPyTma0gjmdsq3B/q8zsxOzns4inBK8BTjOzJjOb\nAhwBPACsBs7L1j0XuGuI+0j6lp8/8OxtAG//xA9n9W+vkYUx2Od4XRQLxUKx2HksdtlOz8IC/i/w\nCuCrxIRzAXAwcNku7Kcvw30Q+KKZFYnXllzk7u1mdj1wd/b6i9y928yWAjeZ2d1AN/COXdhfH91U\nSkQkJ0NJIGcDx7l7CcDMvk+sEIbE3f9IPMMKd/8V8NrtrPNl4MsD2jqBeUPdzw5oDEREJCdD6cKq\n46WJpp6XDoyPZ6pARERyMpQK5BbgTjP7BrGv7O3Arbke1cgpZv8qgYiIjLChTGXyKeBq4gWEBwLX\nAPvmfFwjRV1YIiI5GUoFgrv/APhB32MzuxW4OK+DGkHqwhIRyUm139JWXVgiIjmp9gSiCkREJCc7\n7MIyszsG2a45h2PJg8ZARERyMtgYyJWDPFfxpe+jTBWIiEhOdphA3P3OUTyOvGgMREQkJxoDERGR\nitRKAtEYiIjICKuVBKIKRERkhFV7AtEYiIhITqo9gagCERHJSa0kEI2BiIiMsFpJIKpARERGWLUn\nEI2BiIjkpNoTiLqwRERyUisJRBWIiMgIq/YEoi4sEZGcDOmGUsNhZq8BPu3uZ5rZK4AbgRR4ALjE\n3YOZXQhcRLzX+mJ3X2lmzcDXgRnAVuB8d9+wi7tXBSIikpNcKxAz+yhwA9CUNX0GWOTupxPvrz7X\nzGYBlwKnAOcA15pZI7AA+FW27s3A5RUcgsZARERykncX1lrgLcRkAXC8u9+V/fxD4CzgRGC1uxfd\nfUu2zdHAqcCqbN1V2bq7ShWIiEhOck0g7v4dYrdUn6Tfz1uBKUArsHkH7VsGtO0qjYGIiOQk9zGQ\nAdJ+P7cCm4hJoqVfe8t22vvahuLFm119c/F5vO3yH3DSkbPeyu5zE6yRVIvveUcUizLFokyxiJKd\nr/Jyo51A7jez2e7+U+Bc4MfAGuAaM2sCJgBHEAfYVwPnAfdm6961/Zd8mRcD8bbLf9AMdKx58NlV\n2WvUkkCFvxRVSLEoUyzKFIthGq3TePuy/ELgSjP7b2Ly+pa7PwdcD9xNTCiL3L0bWAq80szuBt7P\n4LfY3RGNgYiI5CQJoaoquJd9o5izcEUKrG5bMve0sTmkMaNvV2WKRZliUaZYDFO1X0gIsQpRBSIi\nMsJqJYHoOhARkRFWKwlEFYiIyAirhQRSRAlERGTE1UICUReWiEgOaiWBqAIRERlhtZBA1IUlIpKD\nWkggqkBERHJQKwlEYyAiIiOsVhKIKhARkRFWCwmkCNTNWbiibqwPRESkmtRCAtFdCUVEcqAEIiIi\nFamlBKJxEBGREVQLCUS3tRURyUEtJBB1YYmI5KCWEogqEBGREVQLCURdWCIiOaiFBKIKREQkB7WU\nQDQGIiIygurHYqdm9gtgc/bwEeBa4EYgBR4ALnH3YGYXAhcBvcBid19Zwe5UgYiI5GDUE4iZTQBw\n9zP7tX0PWOTud5nZUmCumf0cuBQ4AWgGfmZm/+nuPdt73UFoDEREJAdjUYEcA0w0sx9l+/8H4Hh3\nvyt7/ofA2UAJWO3uRaBoZmuBo4H7dnF/qkBERHIwFmMg24Dr3P0c4IPALQOe3wpMAVopd3P1b99V\nGgMREcnBWFQgDwFrAdz9YTN7Hjiu3/OtwCZgC9DSr70FeGEIrx/6P7jwzUdxw388wMfPP7FtWEe9\newo7X6VmKBZlikWZYhEllWw0FgnkAmJX1CVmtg8xMdxuZrPd/afAucCPgTXANWbWBEwAjiAOsO9M\nMm/ZggOJSenHnWte/y7gS9fedO/b25bM/WYO72e8ClT4S1GFFIsyxaJMsRimsUggXwG+ZmZ9Yx4X\nAM8DN5hZI/Ag8K3sLKzrgbuJXW2LdmEA/RPA+4DOplfd/dvik4cRepqnj+zbEBGpbUkIVVXBBWIF\nMgO4GHg7YC8+WWzsCsWmx0J380OlLdOeLG2c9Ryh0NWw30OT6/Z47kjqi69KEn4KLF0+f+kvKjmA\necsWJMvnLx0PQd3ut6txdHyjSd80yxSLMsVimKoygfQ9mLdsQVLausfs0DXxs0lj1+GF5vaJSWN3\neeUAhAJJIY2P0/LPaeekF0LXxI3UFycl9cWJEEg7Wh9Nt0y/t/T8PquTxs5HGl/xy1JhYvtBxC65\nY4BjiYP1f718/tKXnBwwZ+GKApC0LZlb6t8+b9mCY4jddvcsn7/0jhxjUQD+Hvhb4BrgczWUSPRB\nUaZYlCkWw1TVCWSgOQtX7FmYsv71dVPXH1OYtPmQpKnjcJLQkrbv8cfedftvSzfvOb0wdf0h9Xs+\nuWdh6vokyV4ppPFktRcTTakOCiWSAXsKPY2d1Pc2JIW0vrRljzU9a4+7md7GQ6jvObnQsvH4pKGn\nCdgCYWNhQkdn3bRn90oau/csb9/0m+IzB/9H6bmDHgY2AOuz5em2JXOL85YtmAicAbwBOA+oA24C\nblg+f+njO4rFvGULpmbrvanf898ALlw+f2nHYAGdt2xBApxCvMjzF8vnL+0ebP1xSh8UZYpFmWIx\nTDWVQIZqzsIVhYYDfncE9T3TQtfkx3vX77uufvozLYXW5/88aeqck9T3HB96G0tpRwtp+9SW0NEy\nKe1orafUQNK0jcbDfklh4lbSbS2EUj2Flk0kycvjHNKEdNMMSpv2om7as9RN3QBA2jURQhLfTgJJ\nXS/U9YakkL743kJvfQCSpL6XECBtn9pOsak7hEIvaaE4c/K0/Z7dtNUJhZ76PZ86OGnsnpx2tDxT\nfNzWNBzw+zMKE9unpNtaQu/6/bYmTV0bksbOzUld7+bQNemPafvUtWnXxPUN+/7h1ELLxtcl9b17\nA4RAL72Na0NP06Mhre+kVN8TSnVdSV3v80lT53NJU8f6pBA2AH8EHsu2OY2QvI6EkyA8mST8Cvg1\nMTFOyJYAPA48tnz+0s55yxY0A4cDRxIvBF0DPDGMikkfFGWKRZliMUxKICNkzsIVdcQPw9ZkQvsB\njYf94qpCc8fZIRCA/00SfgD8HmgIgcbQ09xYfPSVa9Mte84EDgFKdXs9vmf93o+8Lmno2Z+QJECB\nUEhCqQ56G+pCb0Nj2tFaSjfPeCHdusfTJGlv3Z5P7VM/46lphUlbBr1Qsvj0wfQ+eRhQgCSl4aDf\ndtXPeGrCzt5XSAuUNs6C3iwRTty63WS43W0DL6vSdrpNb0OJumLdy6q7YkMx7ZrUCZAkISEJSUjr\neuhtaA+99ZtDb2MXxaZCKDbVh7RQV5iwrZg0t4fChI6GQ6fv/6q1z677SSg2bQmlut6kkDZTKDWT\npE1ACZISgSKhsIVQ2BDSwnNJErZRX5ya1PW2Utc7NSmUplEo7UEhnQIkpIUioVAMaaGDYuPzodi0\nPhQnrCMkPRBKJCGFpIO0sDmEZBO9DY+Vnt/31wO7MMeAPjTLFIthUgLJ0bxlCwzYsHz+0udH4vWy\nJJW2LZn7sv+07MSByUBzCEnzJ2Z/+L6r/+tLbwmhMDV0Nz/b8/vX/IY4PlMAnmlbMrdj3rIFZwF7\nh2LjttILMyeH3oZ9C83thyeNnYdSX5wZtrX+tvjUYT8LnS3t2XZ1NHQ117W8MJOG7vqkvlif1BUn\nhLR+T0p1M0NaNzOp75mYNHU2JI1dDUkhTdJtrS+Utkxfl27ec13S2DW1MHHLrKS5fVpSV2oIaSEQ\nCrGSauqsSxo765LG7rpQbExD5+Q07ZwckkKpUJi8ua4waXMhaewuV2AVJKexFooNhO7m9lCc8Eza\n0fLb0gt7rQ4dUx4Eft22ZO6To3UYjKO/kTGmWAyTEkj1qrpYzFu2YDJxYs0eIKQdk1tDselQ4NCk\nrnd60tjVSH3PRJLQTLFpQ2nrHlvTTTN6Lj7v9FuW3rnyY0lj17SkUGoiFLaGtLCVUOggjiM1Ak0k\nYRpJuleShOnAhFCqbw+9De30NmwLxaYNoWfChrRz8jpCUkwauxuThu7GpKmzNWnqmJU0ds9M6nv2\nzF4vISQFoJEkNJOE5qRQ2oOG7j2Txq6m/okv7ZhMumU66bbWp9L2qT8K3ZNWAvcDf9zeF4URUHW/\nF8OgWAyTEkj1UizKxk0s3nrDokmh2HRW0th1btLYdUrS0P0nSSG8OM1O2jmRtGMKoaOlO+2a+HDo\nbr43dE6+k1B3L/DQCHSBjZtYjAOKxTApgVQvxaJs3MZi3rIFTcCJIU3OpNTwRup6j0kKaVP/dUKa\nEHomEHqaQyg2dVJs2Bh6G58jrXsmlOqfoFT/SNo98fGwreUpKGwGOomTkabZ0tO3fO+f37QtSXa3\nzr/cjNvfi92FEkj1UizKdptYZNfrHAwcF9LCq+ltOA44jEJpVlLf2zzYtiFNoLeR0NtAKDVAqZ5Q\nqstOesjO6OttotTdtDF0N69LuyY/Hba1Pg3JJuJp42uA/25bMnfzYPupIrvN78V4pQRSvRSLsqqI\nRVat7BfS5KDQ03wkpfoDCckBEPamkE6jUJqa1PVOppBOoFBqSJKdv+dQqiPtaCF0tJJuayXtaElD\n5+RfE+p+CvwUuKttydwROQlkHKqK34uxpARSvRSLspqLRXYB6KRsKWUL/3zO5Rs/8qPFbwmBwwmF\nYyEcRxJekSTU9W0b0oS0fSrppr0obZpB6Jr8APCzbFkNPJbTAP9oq7nfi5GmBFK9FIsyxaLsZbHI\nLtx8FXEqnuNC4ETg+L4KJu2cFErr9016n98HihMANgK/IJ4t5pQvHH2ibcnc3WmmAv1eDJMSSPVS\nLMoUi7IhxWLesgUzidPlzAmB85KEphBIQ0fLhpDWtySFUjP1RUJ3M+mWadmpyFMgFDYATwLPEG8e\n15kt7cQbxPUt7cSbxG0lzkqwDtgyypWNfi+GSQmkeikWZYpF2S7HYt6yBXsQZ7Z+L3ACQAi0E5Iu\nkjC9r1IJaRJC16SetKOlLnROro+j9ikUUuitz84km0AoTiAUGyGt63coAep6uymUuonX0hRICyVK\njX1JZivxTLIShF6gG5IOoP/Smf27AXguWzYQK6YX2pbM7R1uLOSllECql2JRpliUDSsWWTLZtnz+\n0p7s8XTLgfNBAAAMH0lEQVRgNvBnwEnAUcQpfXZ+IIESaV0XSWhMCul2bzkdig2ltHNyKXRNKiT1\nPXXJhI4kaeqAUEfaMZnQ2ULomUDS0A0N3SQNPYRiI6FrEqFrImnXJELnZEjrId7p9AmyedfOf8OR\nF9+08sF3ZI8fJ87QMDDJyCCUQKqXYlGmWJTlGot5yxbUAYcSJ8MsUb4GZSqwX7bsA8zIlmm8tHur\nMzvGFJhCnFDz4H672BoCfyBO23PoUM40Awg9TV1p16Te0DWpKXQ1N4Se5phoSvVQasgqovoS8BQx\nmTySLX8AngaeJVY0G6vkBIIRoQRSvRSLMsWibLeLRTaFzSHED/B1fbMyZ7c3OBKYRfyAf4rYZbU3\ncFi2HAG8klgZzRxsP6HY2JN2TQqhc3Jj6J6QhGLW5dbdTOieQJwOji7iCQOPZkv/RPM4sKmWEowS\nSPVSLMoUi7KajUV2X5wDs+WAeUe98fPLH/j+54A9iJXR4cD+29s2BALFps60u7kUuiY1hq6JTS9W\nMb2NsYIpNgFJJ+Uq5g/AWmKCebGKaVsyd1ve73W0KIFUL8WiTLEoUyzKtndK80RitbMvMansnz0+\nFHgFsNcOXyxNSqF7Yk/a0ZKEzskTQnczoac5VjA9E/rv6nHg58A92XJ/25K5g97Ybbwa1wnEzArA\nl4i3jO0G3u/ufxhkE/1xlCkWZYpFmWJRVskZaZOAA4CDiJVM31jOXsQK5gi2cxJBCKQUG7eFnuae\ndFvrxHTblOa0oyUO8Ie6EvAAcC9xOpk1wG93hwH98Z5A3gK80d3fa2avAT7u7m8eZBP9cZQpFmWK\nRZliUTbischOIjiQWK0clC0HZ8shxGRTPoBACD0TukNHS0PaObkubd+DdOseUGroBH4E/Dtw+zi4\nEdl2jfcEsgS4x92XZ4+fdPf9BtlEfxxlikWZYlGmWJSNeizmLVvQQhzUPxo4hji4fwT9EksIhNA5\nubv0/N4TetfvD72NjwH/BixtWzJ302ge786M9wRyA/Btd1+VPX4MONjd0x1soj+OMsWiTLEoUyzK\nxk0s5i1bsCcxocwGzgBOBhpCmpRKz+8Tep89qD50tmwFvgh8rm3J3OfG7mjLxnsCWQL83N1vyx4/\n4e7bPUsiM25+IcYBxaJMsShTLMrGbSzmLVvQClwA/DWx64vS5mk9vc8c0phumd4Fyb8A14711Pv1\nY7nzIVgNzAFuM7OTgV8PYZvxmxFHn2JRpliUKRZl4zIWy+cvBSBNU+57+tesfOgn/I6HG+umbCTp\nbp3Q9ejhH5tc2udjK1c/yjknH0h9XWG4u6wokY73CiShfBYWwAXu/tAgm4zbbxRjQLEoUyzKFIuy\n3SoW85YteDXwtyEwL0mo692wd2/x8SPq6W1cC1wF3DraZ26N6wRSgd3qFyJnikWZYlGmWJTtlrGY\nt2zBscCXgRNCqa6r+IQ1lDbsU0da/xDwaeD7bUvmrh+NY1ECqV6KRZliUaZYlO22sZi3bEE9cXxk\nMdAcSoWe3nUH1JfW7V8I3ROB5H7gP4E7yfE2xUog1UuxKFMsyhSLst0+FvOWLdgH+CDwAbKr5EOx\noSdtn1qfbptSCMUmKNWHEJJHkkL6MEn6HHWlZwk8Vnp+719SalwHrGtbMndrJftXAqleikWZYlGm\nWJRVTSzmLVvQBMwD/hw4kTgNy6BCqY502xTSrVPDd//27yoahVcCqV6KRZliUaZYlFVtLLLK5Fji\ndPmtIS1MC8Wmveht2COU6qeSpLMKTZ0H09A9PUlg+fylFcVhvJ/GKyIiu2j5/KVPE2cAHlQ2Q7FV\nuh9VINVLsShTLMoUizLFYpiGffWJiIjUJiUQERGpiBKIiIhURAlEREQqogQiIiIVUQIREZGKKIGI\niEhFlEBERKQiSiAiIlIRJRAREamIEoiIiFRECURERCqiBCIiIhVRAhERkYqM6v1AzCwBngQeypr+\n293/wcxOBj4H9AK3u/tV2fpXAOdl7Ze5+72jebwiIrJjo31DqUOB/3X3Nw1oXwq8xd0fNbOVZnYs\nsTo63d1fY2b7A98GThrl4xURkR0Y7QRyArCvmf0E6AT+BngWaHL3R7N1fgScBXQDtwO4+xNmVm9m\n0939+VE+ZhER2Y7cEoiZvQ+4bEDzxcCn3P3bZnYq8HXiTeC39FtnK3AI0AU8P6B9yoA2EREZI7kl\nEHf/CvCV/m1m1kwcz8DdV5vZPsTE0NJvtVZgE9AzoL0laxcRkXFgtM/C+iRZVWJmxwCPu/sWoMfM\nDskG2c8G7gJWA+eYWWJmBwAFd9+4k9fX/Y3LFIsyxaJMsShTLIZptMdAPg183cz6zqx6T9b+QeAW\noA74Ud/ZVmZ2N/A/xER38Sgfq4iIDCIJIYz1MYiIyG5IFxKKiEhFlEBERKQiSiAiIlIRJRAREanI\naJ+FNeLMrAB8CTiaePX6+939D2N7VKPLzBqArwIHAk3AYuB3wI1ACjwAXOLuNXHGhJntBfwv8GfE\n938jNRgHADP7ODAHaAC+QDw9/kZqLB7Z58SXgcOJ7/1CoEQNxcLMXgN82t3PNLNXsJ33bmYXAhcR\nz5Jd7O4rB3vNaqhA3gw0uvspwN8DS8b4eMbCO4H17n468Hrgi8Q4LMraEmDuGB7fqMmS6b8B24jv\n+zPUYBwAzOwM4E+zv40ziDM81OTvBfH6sknu/lrgKuBT1FAszOyjwA3EL5iwnb8LM5sFXAqcApwD\nXGtmjYO9bjUkkFOBVQDufg/w6rE9nDFxG/EiTYj/p0XgeHe/K2v7IXF+sVpwHXFyzmeyx7UaB4gf\nmr8xs/8A2oDvASfUaDw6gSnZxcpTiDNd1FIs1gJvoXzx5Pb+Lk4EVrt7MbvAey2xZ2eHqiGBtPLS\nubRKWblaM9x9m7u3m1kLMZlczkv/b9uJfzRVzczeQ6zEbs+aEl56tXFNxKGfGcQJTP+CeLHuN6jd\neKwGJgC/J1ao11NDsXD375BNI5Xp/9775hlsBTZvp32HquGDdgsvnTOr4O7pWB3MWMmmvP8JcLO7\n30rs2+xTK/OIXQC8zszuAI4FbiJ+iPaplTj02UC8v06vuz9EnKC0/wdCLcXjo8Rv10b83biZOC7U\np5ZiAS/9fOibf3DgZ2kL8MJgL1INCWQ18aZTZDem+vXYHs7oM7OZxKnvP+ruN2bN95vZ7Oznc4nz\ni1U1d5/t7me4+5nAL4G/AlbVWhz6+RlxTIxs4tKJwI9rNB6TKPdUvEA8gajm/kb62d57XwOcZmZN\nZjYFOII4wL5Du/1ZWMB3id86V2ePLxjLgxkji4jfLD9pZn1jIR8Grs8GwR4EvjVWBzeGArAQuKEW\n4+DuK83sdDNbQ3k+uT9Sm/G4DvhaNr9eA/Bx4pl6tRaLvrPMXvZ3kZ2FdT1wN/H3ZZG79wz2YpoL\nS0REKlINXVgiIjIGlEBERKQiSiAiIlIRJRAREamIEoiIiFRECURERCpSDdeBiOTKzA4CHgJ+O+Cp\nf3f3pSPw+mcAV2QXQIrsNpRARIbmKXc/bqwPQmQ8UQIRGQYzexL4MXF+pa3AO939sWxanc8RJ/Db\nAHzA3f9gZscSJ/NrBjYSp+IHmGFmK4FDAQf+Mtv2VmBmts6V7t42Ou9MZOc0BiIyNPuY2f39ll+Y\n2VHAPsAP3f0Y4JvE6WMasp8vcfdjgX8lJgKAW4iJ4OhsnQ8Tp5c4gDjVyBHALOL02m8GHnX3VwPv\nAk4brTcrMhSaykRkJ7IxkDvc/eDtPLfJ3admP7cCTwEnA//P3Y/vt95GYpVyv7tPH/AaZwBXu/tp\n2eMbiTMr/zdwJ3AvsBJY5u5bR/jtiVRMFYjI8PS/x0Ihe7y9v6tkYEM26+kh23mdACTuvhb4E2LV\nchpxtlSRcUMJRGR4ppnZOdnPFwA/II5hTDezVwOY2Tzgj+7+OPCEmfXd+e6vgCspz5D6Emb2QWJ3\n17eAS4C9sipHZFzQILrI0OxjZvcPaLuLePvgd5vZ/yV2X53v7j1mNh/4gplNAp4H5mfbvAtYambX\nAeuBdxOrjIFJJBArj1vN7NfZfq7IbjUqMi5oDERkGMys092bx/o4RMaCurBEhkffwKRmqQIREZGK\nqAIREZGKKIGIiEhFlEBERKQiSiAiIlIRJRAREamIEoiIiFTk/wMEBC40INQ2AwAAAABJRU5ErkJg\ngg==\n", 517 | "text/plain": [ 518 | "" 519 | ] 520 | }, 521 | "metadata": {}, 522 | "output_type": "display_data" 523 | }, 524 | { 525 | "data": { 526 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAECCAYAAAASDQdFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXt4HNV5/z8ze9HqLhkCNoEYMPYB4ztgjG2gudCSEGNj\nqEzchJBLISalgbgNuba/pmnSXBxCKXEICQ+hhGKnGBs3iXMhCWBuBoxvYB9srqFcDLYlWZZWq9mZ\n3x9nRhqtJVmXlXZH+36eR492d2Znvzs7850z73nPeyzP8xAEQRBKA7vQAgRBEISRQ0xfEAShhBDT\nFwRBKCHE9AVBEEoIMX1BEIQSQkxfEAShhBiS6SulzlZK/bGP5T9WSn1rKJ8hCIIg5I9Bm75S6gvA\nbUBZL8uvBqYAMhBAEAShSBhKS38PsBiwchcopeYCs4Fbe1ouCIIgFIZBm77Weg3g5L6ulBoH/BPw\nd4jhC4IgFBXxYdjmZcDRwK+AsUCFUmqn1vrOYfgsQRAEYQDk3fS11jcDNwMopT4OnNoPw0/TS9+A\nIAiC0CsDjqbkw/Q9AKXUR4AqrfVtPS0/AmVEOxTkIfoLSZT1R1k7iP7IYRVJlc2o73jRX1iirD/K\n2kH0Rw4ZnCUIglBCiOkLgiCUEGL6giAIJYSYviAIQgkhpi8IglBCiOkLgiCUEGL6giAIJYSYviAI\nQgkhpi8IgjAIFixfV75g+bq6fGxLKfVzpdSH/MenKaX+Nx/b7QkxfUEQhAGyYPk6C7gbWLdg+bp3\n5WGTtwEf9x9/EvhJHrbZI2L6giAIg6MDU17ezcO2HgQmK6WOBi4A1udhmz0itXfyg+gvLFHWH2Xt\nUML6FyxfFwfi61csTOdDiD8b4UzgVa31DfnYZk+I6ecH0V9Yoqw/ytpB9OcNpdQxwJ+BqVrr54fr\ncyS8IwiCUBzEgIeG0/BBTF8QBKHgKKUWA7/BTDU7rEh4Jz+I/sISZf1R1g6iP3JIS18QBKGEGNJ0\niUqps4F/11q/N+f1jwCfw6QzbQeu0VoXxS2FIAhCKTPolr6fXnQbOROaK6XKgX8F/kJrPR+oBT48\nFJGCIAhCfhhKeGcPsJjD42Fp4BytdZC7GgfahvA5giAIQp4YtOlrrddgwje5r3ta67cBlFLXApVa\n698PXqIgCMLoRilVppT61Eh81pBi+r2hlLKB7wCnAJf2821Rj/mL/sISZf1R1g6if8g88MADLF++\nHAZec2fAmUfDYvrArZgwzyUD6MCNctpU1NO+RH/hiLJ2KGH9DauWlQNlq5esbByqiPe///23AQ1K\nqe9qrb8x1O31xZDy9JVSJwJ3a63n+hk7VcBT/t9DoVVv0lqv7WNTJXvgFAmiv3BEWTuUqP6GVcss\nYA0wBrhs9ZKVbw9FhFJqPHCP1vqcoWynPwyppa+1fhmY6z/+79Ci2FC2KwiCMJz4rXRWL1k5lM3k\ns8rmiF04ZXCWIAijloZVy8oDgw+/hmmlr0k77YPa7uolKz1gKXDR6iUr9w1ZqLlwjIgfi+kLgjAq\nCZt7rvHng9VLVjqrl6zMS1ll4C0gqZT6Vp621yvD1ZErCIJQlKxesrKtYdWyxQCpeFlrofUAaK3b\nMbX0hx0puJYfRH9hibL+KGuHItcfit33NkC0qPUPB2L6+UH0F5Yo64+ydhD9kUNi+oIgCCWEmL4g\nCEIJIaYvCMKopaeUzVJHTF8QhFHJcKdsRhUxfUEQhBJCsnfyg+gvLFHWH2XtUIT6w2makrJ5OGL6\n+UH0F5Yo64+ydigy/aGQDsDiPsw+oKj0jwQS3hEEQSghpKWfH0R/YYmy/ihrhyLU31dIp4dlRad/\nuBHTzw+iv7BEWX+UtUNE9PtmnwLu9l8KQj+R0J9PJLwjCMKoJhTnv4eQ5zWsWlY+2NLKUUZMXxCE\n0U4K43UucDmw2H99zYqNt1JqOfxDMn2l1NlKqT/28PoCpdQmpdSjSqlPD+UzBEEQBotv6EFIZ+nq\nJSsPhOL5tuvlY9KraDFo01dKfQG4DSjLeT0BfB+4ADgfuEopdcxQRAqCIAyBwOfS0C2+T5H0aY4o\nQ2np78HcJuV2gpwG7NFaN2mtO4CNwHlD+BxBEIS8kBPfJ2aX3nTeg545S2u9Ril1Yg+LaoCm0POD\nQO1gP0cQBGGI5MZwgsbu5cvnX70/FS870gCuUcVwTJfYBFSHnlcDB/rxvqjfZ4n+whJl/VHWDkWs\nf/WSlQQZOql4WWvaaee7D68E4B/PXbY/FS+DItbfDwacbjocpr8LmKiUqgcOYUI73+3H+6KcKxv1\nXF/RXziirB0ioN83dgCuuPe6cmCt/3jR6iUrWyly/fkmH6bvASilPgJUaa1vU0p9HvgN5jbqp1rr\nN/LwOYIgCAOiHwXXSg4ZkZsfRH9hibL+KGuHItbfU/G1Hl4ruZa+DM4SBKFk8Fv8i+lfBc5RibT0\n84PoLyxR1h9l7VDk+qWe/uGI6ecH0V9Yoqw/ytpB9EcOCe8IglBSlPpk6WL6giCUDDJZupi+IAgl\nSvsLU+oam9OFljHiSEw/P4j+whJl/VHWDkWsv7dO3IZVy8rbX5hS5+47fltVRfzollZnzPoVC/tT\nNWBUIC19QRBGHX2FcVYvWdnm7ju+MMKKgOEowyAIglB0BObftunCOuAuYNuN153/vrFHVZVMKx/E\n9AVBGGWEWvaLwbTs/dfWei4JLGcaXnwMsC+VKD0LlPCOIAijhnBYB7rF81PALGAmljMGy7GwnV2p\nMjF9QRCE0Ujay/Ks21xbi+VYyYnPeKmZf8gSyxZa14gjpi8Iwqiht9o6q5esbMu8OPULELOSE56F\nWEcjttteOKWFo/TubQRBGNWEzT7UeQv2Md/imP/rwHZbsgfrz4hVN7+Zipe1Fk5pYZA8/fwg+gtL\nlPVHWTsUsf5wGeXMKxOvz+4d/zCAXbN/9rp/+tuX/NWKVv9wIeEdQRBGM7bnkYyNefPh5KTNtVje\nNrfpmDcLLaqQiOkLgjBq6LGYmkcMsLCzzXb9Wx9dv2JhSdbRDxhUeEcpZQM/BKYB7cCntdYvhJZf\nAnwZc+t0u9b6R0fYZNRvsUR/YYmy/ihrhyLSnzsrFoDnsdZtqT0zo2d6ENsaP+6FyxLHvZwOxf2L\nRv9IMdiW/iIgqbWeC3wRWJGz/PvABcA8YLlSqnbwEgVBEAZH5tWTb8rsnlyPFz8KrHh83Mv3UMIV\nNmHwpj8P2ACgtX4CODNneQdQB5RjrqJF0VssCMLoJZyuCeC5/Dp5wou/TM14zEpOesqzx7z+95aF\nW1iVhWewpl8DNIeeZ/2QT8AK4GlgB7Beax1eVxAEYVhYvWRlW2foJhurDl63ylu2lZ383POU+Py4\nMPg8/WagOvTc1lq7AEqp9wB/B4wHWoG7lFKXaa3/5wjbjPrdgOgvLFHWH2XtUIT671x0M5+98X72\nHmgBL8a//O3502dNPL417fQ4Hqvo9A+AAfdHDNb0HwEWAL9QSs0BtoWWpYAs0K61dpVSezGhniMR\n5c6UqHcGif7CEWXtUKT6G77x8+mJ43dvSdZD5uXTL5k18fi1uR29fmu/KPUPJ4M1/fuAC5RSj/jP\nP6GU+ghQpbW+TSn1M+BRpVQa2APcMXSpgiAIR2bB8nXl2FU3+089u7xla0EFFRkyIjc/iP7CEmX9\nUdYORah/wfJ107GcJ7EcrLLM/Pu/8bFNwTK/tZ8C0qXa0hfTzw+iv7BEWX+UtUMB9fc0HeKC5evG\nYTmvJCc+k8DyOrKHqlXyuFca8U2+hxBPK9He/wNGRuQKghA5GlYtqwfWcnjO/TFAAsCuaHw6edwr\nPwaeB9aWcm5+GDF9QRAihW/e92AmRen0sAXL15UDN+PFOzK7Z+7rePuEpdA9L7+30sulhIR38oPo\nLyxR1h9l7VAA/aEQjQ1cvnrJygMNq5aVp5+bPd5rGfMcloOVap1z/zc+tqmHGH4uUd//A0ZMPz+I\n/sISZf1R1g4F0p8TqkkBd3tOrDK95dz5yVO2W3b1gQetmPvBfrTmo77/B4xMoiIII4RvVGP9p28C\ndV+afy3f2njzdEzhQoAm//8x/v+9/v+0/7/Of9wYGFo/WrOjjvBk54DtOXa521Y1GdhPzLGw3Q7M\nPimJ/TEQpKWfH0R/YSk6/b4hBQad9h//Apjrr/IEcAZ+p2OIDv9/Iud5MyacUQc4wINAA8bY7gRm\nAJuBRSNs/IXM3qkHnvc8YunN8+twUxZe/KH4yZuvThy992bMfjpS7L7ojp3hRlr6owy/M6sOOB2o\nApL+XwZo8f+/vX7FQhmwMkz4hn8/cD6m5b4NmA4c5bexPOAsIGYdbje5F4E4xpTGYEzM8l8L4tpT\nkKKGNlbWArDKm/8xcfTem+i6CAo5SEs/P4yY/tCcnyngHJJtVZbtvOG5sWqrcn/SO3j0P+CkZhNv\nSmBlwcqaNzrV4MXBcsBuh4rWj5WfuvXeUTJApWj0h0It9wLzMaa/A5jqeRwF4LbY2JUmqSRm22Q9\n17EsYhhTb8FcrMGYVgaY4z9/yn8cmLzjb3878CngzQKEdwoZ0095LveRtc4Dy3LTlQc8J3l6vH7f\n7YQ6eY+wqaI5dkYKMf38MCL6gxim55JIP/2BGdjp+uSE7f5Ht2PXtEE2Tvq5KV5q2pbuerLgtlRg\nV7ZB3DOKXZ6w4lzy44Xffv2qdTeMCVbtx4lSbBTF8ZOTVRIHYsBngB95LjNx7Rpst9Oy0zumkhz/\nGnZls4edbcFjmxXjY8BPMBMUbQauxIRv4sANwEa6h37OAr6DSU1cysjH9QuRvVOPSdl007un3Jo4\n5rU1dkWLhe08bMX4q2C9fu6Hojh2RhIx/fwwUqY/DtjmecTSW2ZXpaY8lSDmmvJ2MV+BE8NNuwfs\nKq++h00Ea4ZVNyYsu87BfYeuFuS01UtWvjHc3yePFMXxE+pYjGNMG2CH5zEPj7h7sMKyK1rNUg/P\nba61Mi9MJbhwZ/bM8HBTf2kf9dqzZRN2pOkaRTqOrqKG5wJlwA8wRv9RIGjZQpf5w8hcAEZ034f2\n8SzP4fn0lr+YixeHmPNEfOzLH7zv+hsG2mApimNnJJHBWRHBP9jv8Dwst7m6Knny9jhxFyw84jhY\neGR5xjlYeYFd5WUtiw7Lotmy8CwL/L/YYRv2qHPM+JVazPFQC9zlt6aEweECW4AtnksMJ5Yga1t4\nMdLPnrkT13oSi/12RctTeHE6bwq8uAX8zt13/Pa2TRee0bbpwqA6bTq07bdWL1m5FTPAyMEY/lLM\nHUFwh7EaMwr1/lE6CtXsX9cqS56y3bzipP4+cdzL6VH6ffOKmH60cHF5LvPClLhd0WYBnmXR6Ju7\nY8VpSoxpvsGyOk/+auAx4CJMKKDDsgi6EpvJQvDnebRgWj0HMa3Ue+QE6j+hWD4YU7oCaGjfNfuq\n9LZzOtxD1diVbV5q2lPvWDHvgGXxtJXMXvyf//ABMi+fflFmz8wtePHgtvso4HfAKwuWr5vdtunC\nNOY3me1/1rjQZwXn8B2Yzkubrov3jNB6kaOnSc6DEbXtL07+B7d1zCwALDbFj9+1GxNaK+mpEPuD\nZO8UMbn51w2rli1u17PHk63c6LbW1djVjY9hcaW/+k8wZuMCW4GpmBO/FdhJV/73RZbF9zxw3XTl\nNLJ2PZaHXXmo2ot5TZbFc3SlCUqecz/IieXbAJ7HnWRjZV5rdRY38bDz1kn/Lzlxy79YNu10hV/u\nvmvnzymfsfOPbZsunIvJ4Z8A/BawsJwE8DgWj3mO5Vpx71T/fbWYFM6tmNZ+ii7zdzD9CE8O+xcf\nRsKF0RpWLeuWdtm26UKAlZl9x5mwjBe/NnHcy8HdkI0ct30iLf0iJRS77CwW1bbpwpTXMuZhvDgd\n/zdpmmWzCPgh8CO6DP9yTMt+Emb2sgbgxxijAPgzcMiyaMs2HXMhlu3ZFWnclqq4l4nv8zym+uvZ\nmNa+hHly6KkFSpfhu54HbkvVmW5Lzbl41vkAbtPYpyybizB54wfoHrJh/YqFbetXLHxp/YqFvwdm\nYjlNyYnPkJz4jAXOXLelbr7nkvC8bvFn1//MO/3H0zC/fyNmutLNvt5xo+x3nArMwYuDF+/AHNMA\nn/T/3y2t/d6Rjtz8MGT9uWViwx1W+INu0jvPfK93qOp/cVMAE8pnb3iT7i1Ml5zBOaEWUxwTcmjE\nv3vApLy9RDZWm95xBqkpT0Msi+fEttrJ7Nv0MOAnMI8iy/AZ6ZTZztK8/v8UJpsE4PJ2PWNK/NiX\nHwLI7D6zAy+u1q9Y+FJP27rz0h+0puJlh2lf+M1bxiXH7/yF21Y1ObPrLGPYlgM2z1g1b301dcrO\nx/xVV9P1O10O3O2/HhjgnXSNF5g0DL/bsO37XksnwytAAsvxrLK281PTHnkKf2QuXY2f/hZUi7r3\nDJhBtfSVUrZS6kdKqUeVUn9USk3IWX6WUuohpdTDSql7lFLJ/MgdnYSMZE3Ogb4I02Jf1P7i5Ell\np2z5ZWr6RotYy377qNeCzIygE8/taduhqoIXYwx/DV3GkLZsnnhP7fHgJZ8FF7IW7dvPmda+e+pX\nMEbSud1gBCTw/ChrOQ6GYHTsWozxAtjtL556httRNt6ubMGubMGqalwUGH7uHcLqJSvbUvGyHje+\n7suffcOKZy9wD9ZPwItfgBf3cFPgpGZ6+8f/sm3L+Rf4qzqYTuMrMRfyoD/nDv8vsnfz3SY578KU\nTrYckqc+2VQ29ZGvYn6HWZiL35V0hc+EHhjsAbEISGqt5wJfBFYEC5RSFiaccKXW+lzgAeCkoQot\nEYJ4JNB50PstM/ddweupaRv3lE3YcbtvvHdjWnNBK+cwQidPHaF+nODC8rULrsEe89Yyk81pA1hu\n07ErM6+P/wwlXII2IGzWoQstwM/pMpvPeR5nJU/Y/Tsy9n95nr0Fy30kddpTfwy2wQA7GlcvWdl2\n3/U3HOgM+QT9MpZD8sQdq7xM8vcd7xxzPeZ3vx3z+4bPaRe4DBPmm0ZOSClqmGkQnf/EcgA8u7zp\nOT9pIY1poGz2H9+NdOj2ymBNfx6wAUBr/QRwZmjZJGAf8Hml1J+AOq21HorI0U6OkdwdmIz/V+95\n3B9/1xv3pLef1ZHefuZ+YmQwpRVSGBOf4b93KRxWgRD/tXpMrvcM4JNhI7/l8TsoO3H3V8F5ErIk\nT9lKcuLTs2LVjU+3vzClc1J7/wI0ieEJExQlOWZd7z8PzDPo/9jSsfeYGrJ2JbEsyQnP0fHS5L+3\n4u6CfOnwy2achhmVC4DbWj3LeXnyRs+JldMV2w+Ohc8AS0O/051E3Ajto16rS07cfHZy4maItRwg\nxqmYiyGYhugiul/YUlH+vsPFYE2/BpM9EJBVSgXbOhpTVOpm4APA+5VS7x28xJIhTVdLPYUxmiB0\nMAvXq0lO0PHkpB0WFnMwg3Tu8tffgrnNh/61Jnts8VkJ2ojj2TUHsSsOgEu9e2DsL/x6PoAx/qB+\neYmdUDYmbh/E8y/H7PdtHXuP+Ypd5mx0D1XFyeLZ1U1NZWrLHkItznxM3rF+xcI3gPPw4hdlds/0\nMrtngps6Kv3Me+ek95x2E+b42YZpCGzCdMQHA7vOJ3SXF9nfL+Y021VNHanpj++0LKoxXpQK7mZD\n+3kp0uLvkcGafjMmB7xzO1rrwLD2AXu0wcHcEZyZuwGhO2FTIGTKnkfMPVRtZfZMA3Dsipbncop0\nOZgMnW7vg8PCEj220lcvWdn22TlXgomFOpaFA3jEwa44CDiz/fcRvgOhtHKil2JMPhw+SwN4HjPs\n8pYN4CXw4rjpykYsnqSHC2svMeoB4Wf5/AovPhkv/rj/ciJx9N5feh2dGVpTQm8JwoXNwLUwuFBT\noQiOtwXL19W7+46/I/P8zK3Y2YetGG3AI5hqo43h9/j7ONKhrOFksHn6jwALgF8opebQNUQc4EWg\nSik1QWv9AqZF+pN+bLMo0oiGwJD1r16ysvNx2mkn7bSz4qHb0C37sb0UJ6bfxxeWzJzX7hnfKIsn\n31cWL6MuVbM//D6f1hUbb+18LRUv67b98Pq3PH4HU9416XUXD8fNknE6eKXpNVyT1JCwYMuOl99g\n8tGTiNkW157zSW55/A4Als+/unWo3ztP5P34STvtTD/2NACWz7+68/VUvKw17bTzzT/ewq63X8JO\npXEPJbBfmcX3Pvf++mOPqvpAKl62P/gtUvGyI+2jAWtfv2Ihjc1pvnLrRl59y4T63UP1Z59yQh2V\nqQRXz/4oZfGyv7zl8TtebO1ow3Ec3m7bv/Ok+hOG4/cblnM32P+u67H/hApee7MVdeIJXPuhf+PH\nm+8E4NpzPkldqqY12NfB/9VLVjKc+7+IGHDm0aBSNv3O2h/SVV/kE5ja4FVa69v8cM6/+4Ie0Vpf\nf4RNRj1tKi/6w52FnVUEs/Z9bmvl1MzuWdvs2n1LyybsCOqs2PRRQz03tbC3FmbDqmXl0489rXXr\nWzt/678U1G5Jtb888RT3nXc/gBePY6cbU9MfqSLmNVsWE/FbUkXSyTssx08f6Znptk0Xgu38iVjz\n7ORJuyBmNTqvT5i17p/+9rDUzJzt5e6zIWlf+M1bxrkHxv4PHudgORbQYZVl5oemCgzSfvE/5xlM\nJlc3hvA75m3f95K2vMbL2uXpze87z5SsYHL57A1NmD4Kh67fJUhLDvo0+tvvFHXvGTCSp58fBqS/\np5M/x2CWAnd7WassvXXO/OSEnXG7qnmjFXOX0FVcq0/T7+1zeqIx3ez5VTbTdOXwA6z1PM50Wypt\n3GSdXdGMm041Oq+rPs2tAAx7rrhP57iJti3n3YRj/zI1fSPg0f7KpIvu/8rf/6q3fd7HRXjQ2jtN\n0cNu33XmTYlxL/0SILN7Zgde/Kzy2Ruex2T03EHXMeNgQlXBb30PA8trzyWfDZ7D9k/DqmXl6R1z\nJnmtdc8A2Ee/OrPs5Od+7683bfWSlW/kjEUR0z8CUoZhhOlreHkIM6zei1UkT34uYVccAtudijl5\ngxM2WK/XSor9OYn9lj6YTq+g8wv/MZaFa5cf2uUeSs51D9WS2TOtFje1aeHXb5tdpjYXon77iJIz\nKM2UWHCoxLHXdq4UyzanTtn5WD9/23xjWxbEx770DDHnANl4PSaP/UmvI/6UlXCaMMcMGIMPOqSh\ne8OhKPGLzn0f01e4I1b/1t7Q4nDcfildF7JiGzxYVER24MZoIyfr4HbPw04/eyZg4x6qApcd4XWH\nqbMqXMMljUmBm2bFabErDzySeWnyE7gpsNNHxd+9Z7OXtX8TSmMcVYQ7wf3/qwE788a7F7uH6mcn\nT9meADraXz71EsvmxCOZTD6yd/oiXr+v0T1YP6Hj1VPPw4t3YDkJ91D1OZ4Tq6RrykYI1Qeiqxro\nlYW+ePe0fxYsX1ePmSDmXEy/4eI1n/nGG5iw8rQgiwxzB3aPv50DYvh9I+Gd/DDk8E7OsjWeY1e7\nLbXzwCLz4rRHEydtvyxevy+osbLIX/2IMfv+kHbavSvuvW4MppXfbcYhv4W7GpjhuWxPb/mLZHLC\nlrl2daNF1gbbfcSKcXCoGoZIXo+fnFDDJzF1i7Z5HqSfmXMwefLuMVieZ1ceeMyKe83khCNgQDHy\nIYd3/KddZvml/16UPPHZ+8Aj8+K0x3FSHyqfvSFNV3hqC6YkB5i7x27hnQF+h2E7dxcsX/cBTLVR\ngPPKZ294CtMwCe5GF/vPn/efTyKnIdSP7xB17xkw0tIvAH2l7q1esrKt4/UTl6a3nWdKa1muZ6Va\nPxWv35fG3IrPIs/lcv1SAMHJ4gaP/ZM/OMG2WDbtsbEvfQw7a8oy44EXO9vzRm2YMI5pYW70XA7h\n2Anc1JjM7plkXpj2ODGv1V8ndxT1iFz8ert7iNW/sQvL88CCbHwisK5t04VjMWGeoLTGHf4f+CPB\ng8GAFEE6p9/K/0Xw3KpobPZ1BSGqgPBoXOga37KWCKSkFgIx/SKjYdWycretKoXLqXZFk2fXNDam\nJm96i5yh5oMNF+QOymlYtaw87bQfKfzQORYgedwrjXZFyw63tdZzD9XiHqqJp/X0n/nlbiNLD6UW\nFuO3hj2XWHrr7Mr01vPATYEXf8eubP4bf7zEDAo490Bu67xh1bLy5PjdN9pV+x/qePM9FwA1WM65\nWI5OPzf7WMx4jM+FNvEZ//89dA0GLIaL+HswndAAF6WmPP58aNmVdB2ndZjvkDsaV+gFCe/kh6He\noofTNNd4HnZ667xkasrj84hlmyyLaYSqYw42lNBLCuKa6ceeduHWt3ZW9JUB5BNke8zynNju9LbZ\nZyZPfi4BcTIvTn3Urn3nsnVf/mwhplkc0vHTV3rrwm/d9IH4mDd/BzEyu2d6ePEPA4/54ZL7Maa/\nBWgYZCw5r9r9/2sAu+OdYz5nJ5zteG4cy4NY9qBd2ZKxLGqAxzHzJjihzQUduwP5Pnk/dy+58dv1\nzuunrMONn+tvf8L6FQtfagjNjYvp+6oDghIv40OZPJ1IeOdwpKVfQEKdUGu7HayulbCSHcuybRXK\nstiCuQ1fS1eoZURGVYYmcQnKQgRZH5utePZvUlM3bbcrWwCH5IRt58SP+fOzC795y7jh0FIIFixf\nN87dd/xqc5p4EHOejB+/KzD8FF1lD6BIariH6zgljt57k1154Cm7shG7pgm7oqWaLLV+O28ypkPU\nxoR9FmHu5jZjLgQFaTU3rFpWHh/78urkxM1zsJwOzETwb/qLg1IlwZSQP819f7gcQ6E7p4sVMf3C\nksLE6Gfh1w9pf3HyF9PPnD/faxmzLfv6ROiaKGPI5IZwgufL51/dW155MInLarqqeF7u//3YSmSn\nEs82ZV4+bSueZZGN17sHxq5Z+PXbTioGA+wvfWSO3Ifl1IMLsUxTasafDiWOezkIgwQXQIdeqpuO\nBL2E5QJzdK24txiPbXhAFtI7zoiRtZ/N2Uw69H8Rhe2UBzdWjmclMOZ+1foVC9vgsMKEMzB3Kgq/\nlV8QrRGkGGJ3pUwQpwdIL/7RV8fFj+n4A2PeTGT2TOsIrReYbWdoxw8HLQ4e9/cDc9ft53u7fb5v\n6C7QZFkfZ9L8AAAgAElEQVRss2uaP5/ZPfNJM72fMyc+9sXnPZfHGlYtW1js6XM9hcguufHb9SSP\n/1+yzEpOeBa7sgVsZ6dlk6X7Bfgqcuq+FIKeftPg2ACwkuz1XA66bTVVyROft/DsGZ7Dk1bcbaJr\nusXgItbjQL+RIv3c7PFea8Vp/qxYDl3TfHaugtG8mVCWmdB/JKafH/qlPxQuOSwuD+B53E/WPh8A\ny33cirGQUHraMJ6Mnfp7KgVB6FY/R3cd/lR96V0zv5sY++rvwDEmGct6wIOWzYdGwEQGdfz0FMtv\nWLWs3HPsP2G7s8nGcNNljXZ567NWnDa6G/50/38wTy0MbmTrsB774VIMnkcCx651D1WT2TOzw0ql\nz/I7SLulPYbSdfvTZ5Q3/Qu/ecu4+NGvv4JnJTJ7pu63Uun33f+vH9+aq2UQabF9EXXvGTBi+vnh\niPpz6qAcVjqhYdWyci9r/9ptqZoLFnZ104OW3VUjZZiN0wOsvobC574eMv0gpj3N64ivcw9VnIUN\ndvkh3HTFAWfv+NNHoHN3KKYfjKy9PPPKxGOzB8ecmTxh1+12dVMCAJfHrDiXYvpVZvmfsx1TS78G\n0xLdwsCn6RuS9oEQ/Faex51uS80ZGT2rDjdlAQeAM8pnb2jEhPDCY0C65cP38Z36rf9IZn3xV/9r\nduIE/QS42FXNj1nxbBPdO6ePpGUwRN17BoyEdwpMcCK0bbowheVMwfwm++0xb16x7sufLaaOqM5W\nbk6tkx34IQIr4Ryya5qbyFq1bmsVmd1nVEHs7ktu/Pbi+66/oZhvw23Pif06cdxLZ8cOvUNm90yw\n0wdS0x7fZcU5iLnTuZyu+Wgd4CzMDHGdoS8omgJ03fA1tTWsWnaxe3BMCjd1BpbzO6Aey9nsddi7\nrITbgkl9DMw+PGq3G4NpaR+pRIXpQ6n5TWb3TLCcjtTMB8OlRoKR4gXrOxlNSEfuCOEf5J1z3oZT\nNIE1seP0RCynBi/u4MXnFyL1cYC5/0F6X3ChehKYZ9lkiXsddmVLB3Y7yYlPz4sf/X+/KeKOXdfL\nUuYeqp4NgOWC5WCVOx+14vyNv87dGFMPZ7e8ialWucgf+l/U2SLB/r/v+hsO2LV7X0hOfMZLTnyS\n5MQtdWDP8TzOwEy8kttpvzj3jpThyBpLtp6D5dThxcFNbXPeGn8pcA3wP5i7SRszE1jn3WcRH1NF\njYR38sNAYvphUsA9nkecrH0unp1Ib53fgZsa78+SNFL0qb+X8E5neQZMaz+YuONc4GbPJe4eqplp\np1pNDPnlaUvIlK8PMjEKoL/HlunCr992knuoZhUuZ2G3k5r6BMTcZsumw9/uDiBD14W63t9Wvu5c\nRiq80y2X3+uw/wDMAQu3rRKs9ia7qt2xLJ4mJ2mgr22tXrKytb/6e/sdLrnx2/V2deOLZGN1md0z\nwYufVz57ww5MP0MtJoTWGRLtb9nwfhJ17xkw0tIfIUIH6trQ3z0A2dbyz3Yed5bTHDt+Z1HN7dnT\nHYBvehdjcr0zmLj2tNVLVu4CGiybpXaqbZfbWk3mhdMhU34PtvPbS278dv1Iau+rZbrwm7eMc1sq\nNidP3npW8pStYGU9Ym6TZZPxV6mlK5c9XJaiKHLyB0hnobXVS1a2WQl3MXH3ALGsZ5c3Y6cytV5H\n7DVCZZd7+o5DKRzX291QtvGYY3HtSsDD4lFC8wBjDH82XZVChSEipl943OybE5rSW85tSm+bsy81\nY+NzyeNe2UTugK0C09MJ6z9vxIQCHKAxZIy3W4mOS+zyg5uTE3Z62Gkrecoz8+Lv+vOGoLVcADqN\nrGHVsvLkCbt+mZr2WJ1d2Yxd2eilZmxqsmwc4FlMzH4jXR20o41Gy2IrFg4x84LXWju97dnZFwD/\ni2llH3YM5jlzhgXL19V7LWMezjw/i8yemRtx4x/27wbT+NU0McdYXucbLmXE9EeI0IG6KOdvsbvv\n+DRuCtyUh0VHT+8v1hhmHyegDWAlnb12VeNG4AC4FlizvY7E78LmO5zfK6Svc6LsS278dn1627wz\n3dbKGcSy+LPlbfPnB64BTscYzUWE+mAibjY9Xbwcy6KJrP1Mets8MnumQXvsHs/jPOAoIBleeajx\n/F5+6/cAR+PFE7jxa9evWHgg9Dl3YFKCg3pAnX5V7H0oxcxgp0u06ZousR34tD8fbu56Pwb2aa2/\ndIRNRj2uNmj9C5avC06ClH3Ua6myCTs6R0f2lTI5JLWHk9fpHuk+I9NSzPSCU7HTTyRP2QZYdLx6\n6nmpaY88RX6+V4/6e8jtXuu5xNObPxDHjZ+LnbaSEzd7VkXLNjvh7sMYi4U5pjtrz+S7ddsf7fkm\n9zuEjikbuLxt1/SLkuNe/S8Au+IAxHEsi4eBi45wHA6kP6vbe/1j/1fA+Zg7xfGhEhe3030mrCBL\n7GJJ2Rwag23pLwKSWuu5wBeBFbkrKKWuxnTuFUVPcTHiH/RrgbXx43fhz397N33MhlWshHLefw0c\nS1erMg1QPnvDbuLuRZnds7zM7pl46eoH0jvmTBpmPd1apZ5rlbmHqmaCMwXLAcvpsCsPPmkn3Hdj\nzOUcTE0aGz9u32t9pIiR2zLOufOk/NStN9o1BxxwvPSzZ+A218W9TKIKulro+b7Tsar2j/fTlPcD\n08pnbwATVtqGmccgyJbahvGSGeS5rHgpMljTnwdsANBaPwGcGV6olJqL6Xy5lRK7ig6Qzto7bltV\nrwdz+GSDHrOAioEUcAam1bYNk263GBMe+Q3wfPmMh663ytrOx4t3AAmvte6B9hcnf9E5cNQnh/si\n165njXUP1s61U+n65MStY1Iz/vB0atbGg1bcm4A5RquBBDCG7udFt/pIw6lxpMm9EFgWnl19sCV1\n+hbwsqS3nz3DvzCvBe5vWLWsPvyeoCx3fz+L0AVj4TdvGZd4955tyYnPHIXdsT1+/K63gJ9jwkq1\n/nsOYC5KlxEqK16soc6oMFjTrwGaQ8+zfsgHpdQ44J+Av0MMv5NeDtQ6zEF+lLvv+BR9tKJCrxV8\ngoteSAPP+Y+DQX+TMFk952JO5JmpaY/swYQF92E5NfGj3nzSijvb853VEzaZtk0XTnKbjrkrs3tG\n3G2tBLIeMbfNsnAxZRR2hjQ7mFZm8Dt0m8cgnxqLgdBxNBt4GIsMVtaxK1tITtiR8DrcBz2PMzEX\n83uDTvjgTmrFxlsPa4T0ZsrhC4bXXlELxLGy2GPeuC5x3MtpTAG1/cDDdCUFBGVAgj4wKN5zIBIM\n1vSbMS2jzu1orYPb+cuAozGxuhuApUqpKyhhjtAB5vh/6Sh1TvVyYrdjTtqHMB2h/xFa1uL/v7N8\n9oZXgKlYPIFnJcjGx2TfPuGMfOtq23QhbZsu/CvMhWcu4NiVBzvsmuZGy2YJ5qL0UYyp7MdMvv0w\n0Dnhe2hQXUELkQ0HOZVUfwRcZllMxbKfBrArm0lNeboWh3rPowVTemJNw6plJx1hm32a8oLl68q9\n1upbwHHsymYnedJz3/MXXe5/xocxZn+/r20tFOdo5ygy2I7cxcACrfUnlFJzgK9prS/qYb2PA6f2\nsyN31JJ22lmx8VYAls+/mlS8jHS7wzd/ton2dodr/noa48fWHWErXduCzikOC0KP38d/Letm+fu5\nnyIVL2PFxltpbGvG86A8keTPzW9gWTY3fuifqUvV8Oa+Fq761m/Mj+/F+fInzuCcKcfnRdfCky/h\nP+7Zwht7M+CZRvwJx8VpPfEP2BZ8/0P/DMDNj92O53lcPfujlMXLSPl/pUDaaec7D/2Qlxtf46T6\nE/jHc5cB8N2HV7Jr74tkOrKABbEslmdxXOWxvJ5+ExuLmy76F2pTNUD3Y7GnYyOXHS+/wT/f+hCx\nCduoqG9jwpj3cO05n+Tmx24H4NpzPsl/PPpTXtj/KngeE44azxfOu6ZzW8VwDhQRA46mDNb0Lbqy\ndwA+gYnnVmmtbwut93FAaa2/fIRNRr0Hvb8F1zorbPr12oMMl8XDNFK1vwxmYvfeCrN1fsecbJ46\nzDB/CE1g3bbpwqnAE/7rHcBZ61cs3DoY/YEur8Ouc1urz7YrWiz3UA2ZPdMh1faxxLv3PBCv3xe8\nJ40/CxjmTuDiYajg2G/tI/RZh9HLbzYW2OR52OntM/+cOm3rdLBx026jXeXW+qmt04C37rz0B/tT\n8TKrh232OgI6fuwrLwBW5sWpDydO2r7En/85RVfxvtmYukYzMMXtLh3GEspR954BI2UY8kN/TX8N\nQMfrJy51Xjs1KGp1+foVCwtdjGzA+7+nUrc+PVXjDFIDwzVdOis4tm268EzgAUxHagcmnPLHAVwI\nPcBasHzdScTTF+E5/5Ga9rhFzME9WO3YFQefthJM8NfdRlfOug3M9F8L7lSDqpsjFc4pmmM/57dK\nAHb7n8d/w917wm+xslZq6pMQd5osi+eAiQCT3zXx6Ofe3l2Rc+HvvZLmV+46N/GeXQ8CdLw+YXJq\n8qZX6CreF6Rnbgm9xWV46+YXzf4fKWRwVgHINh5zLFCGOaAj2TkY9D+EY7gcObvlKsyk1kHVRADW\nr1j4MGYEbAfGbH4JPBoaw3BEFixf1wC8gJO6GTdluYcqcA9Wec6Bse+zEhwMrTqdLnO5CnPST6Wr\nmuOozNTJpR8ZMDFgbtkJr/wqNX3jJvAa3UMV4Ni1nssUf3ntKwdeA3+ks9/J22s8f8HydbO99vIH\nM3um0vHqqeff/y+f2JWzSth8XeBazMWgYBPPj0bE9EeIIJukXc+6xmsZsw2YD1xTPntDnymYEUtP\nS3N4jZ5gijsbE97ZhrllL8OkdQLgh3Q+FNrWDOCICQALlq8b9/NfPwOwisA0LAe7prnFrmk5UHbi\n7tf8z96BySJ6Gn/WJUylzHBmzqjO1AnorbM1lPF0JWZUchyIW3FmpWY+bmFZuK21uM011Z5rijeM\nq3pX8PZgDuXDPKVh1bLyhV+/7SQsZ2Ny4jNW8pTtFvGOt0Of+UnMBfhpTCv/Kn87D2N+s1lAXYTO\ng6JGwjv5oVf9ube7C5avOwl4AcCq2j85NXnTjf6qh6VqjsBI3IAh7f/cuHAvy4MJZCxMyi+YLLBu\nVR0XLF93NZbzIywHf8q88/w7AaBz7topwDjgGOD7mLsDg+1stca8+s3Uyc/f5b/yGGbQFcCDmAE/\nh81c1t/wxDAw4sf+kY6r0O9V5mubDOBm4v/Xvn3OdOwMqSlPe8Sz2ZNrj4+/1PzaacAtmCy0bhU6\nO/tZnFh1esu5c5OnbLfsyuYnrETHe0NVS4NZuz4AfI+uyc+DPsNtdM1FLJOoDBEx/fzQVxmAnoae\n/xIgfvyuSxPHvdzr7EQRM/0+dYYuDCngLrpO6G6zTgEptz2x0bK9ye6hGjK7Z3XgxT8KaIwJbQB6\nyun3SDUtS0197DI//z6BaS1m6Zra8GmKL/WyIMf+kS5uOa3qOuBOzyPmtlTNtFPpOre1HLviIJa5\n3Ab1oh4HlqwOTVLesGpZuedYf3AP1ZyRef4siDmbEidt/+s1n/nGG/7ywPSDEsrQPaZ/Beaua7im\nDY269wwYMf380C/Tb9ezxnodyRqvtW4v0Lh+xcIjZouMUMtz2E0/Z90gPzUIoQSd2p/Bz9rwXJLu\nwbrqzO4ZFm7f4fWPXngSP9/49GmpyZtuoms6yisxBbtsTGy4CWgsYIu+N4r+2A938HpZKnGteW5r\nOXgedmU7xF3PsrAw32Ufoeys9j+Pn5cc++ffAaS3z96XmvHEc5btOZjQ3lv+euOBlZg7uG2YsRO3\n+x8fTggYjt+r6Pd/vhHTzw/d9PeU2dKuZ43tTFXbPfMdvPikIsjaCRjy/u/PSemvcz8mXr+ZrhGW\nwQTrM0N6dnodVso9VHdWZs90uhm/5Rwg5nwJJ3UIeP4X377wiSvuve44fxs28GnM4LB7MKnEHsM3\nCcdQicSxH7pY/7fnMQ9Tko30c9NJTd6yw0qgMCGZfZhMqG95HpVkOYOYP9rZ4VkrwSS6Rj/vo2sC\nnmcx+8KBEZsbGiKy//OJzJGbZ8KG0hCaC/Tir/2sBrDAozNePYro58mZwhh+Lcacg/lYU5hwTA1d\nI3dPtxLeFrv2wIHU9EdebN911jVYbrtVdqgsOWH7v1k2izAdxHznoRfBdA4HPOU//wymMzD4vDCS\nxDBw7gBOtyxMWzHukZq8BWJM8Twcy6IRs68f99e3glr9AMRN30CIYJKaakwGVTCqPzWMKZolz+hy\nnuIiMDVTa6S1rj2zZyrJU7aSmr5xt5Vw0rCwsApHniA7xsbvUMXUbD/fX96IaXnVY1p8n7csvkei\nw01NffRtTKkA6Mqxvw8g42Sgq3O4BdMiPc9fPxjw01AEoZyo42J+vy9YFn/wPJO2CYATi7tYr1tx\nZ5Jlgefhgm/5WRxixPxBXQcxF+TPYTptofuk55FNY44K0trJM6EURcidVs+Ld4DtYWdL8qAO1bG5\neLU/mTgmFONgbrGrMIZvYRok/4k5Risw6Z5nYDpzPwPchDH28z448f3BRyT8bQT1jPoyktE6I9aw\nEC7FvHrJyq3AKZbFSddO/wfSz8z30lvPpX3LX5ye3jL/mbbdkz+d3jzfTW+bQXrLbDIvzHifZTEB\nE7s/Bfigv42L/b8Phx4XW0f7qENi+vmhp5h+58jOtk0XpjAZCrZV0fi+1JTHny+yDsWC7X//+/8a\nc5ufxVQdDbR0YFqGQbbOAUwooI2ulr13dKLeeqejWzTAw9w97KGrpT8pHDIokv0Oo+DYX7B83QXA\n73pY1gG8P5xyW4REff8PGAnvjAx1+LfBXmvd3p5mImpYtWxxsHIRGNFI0wY8ibnlf5Qukw/q24M5\nOYOa94nQe3syfID/R1cddshp6ZfgPh421q9Y+PsFy9fNwITT5vgvHwDmrl+xMHfUrVBgJLwzfLiA\n265njQV+AjRhdzwSP35Xb6GdFCVYJzxnBqdXMDnaHRjzDv57mHh/M3SbQ9gJPd6Hmcj8QUxp5wyh\nOuxi8sOLP6L6fcBpmM76CWL4xYmEd/LDYfobVi0rb39hSp277/htQC12xyOpWQ84lo3D4ZUpAwqV\nRlg0+z+UGhhMnhHkajb6/1PACZja/XuB2n97/w07v/LAt8cQzdZ80ez7QSL6I4aYfn7oUb9fMmA3\ngF2796wytfmH/qK+Rq0WwqxG5f6PCFHWDqI/cojp54e+TP95APuo16aVTdhRi2mhvglF1RIdlfs/\nIkRZO4j+yCEx/TzSQ0VMU27AckietGMVZg5ZDfwvJRa7FwShOJDsnTwRrk/SsGrZ5X6a5ibAso96\n/f2Wze8psRaFIAjFx6BMXyll0zVdYjvwaa31C6HlH8Gk3zmY0XfXaK2LIo40zNjALM9jNbZThRs/\nGvC8dFU7JnUwiakdXmzhHUEQSoTBtvQXAUmt9Vyl1NnACv81lFLlwL8CU7TWaaXU3ZgRd+vzIbhY\n8Qt5XQ7cg2uX4XG2v6jRaxnzFv7+EaMXBKGQDNb052HqmqO1fkIpdWZoWRo4R2sdpM/F8evPjHZW\nL1l5oGHVskXtO+bNw4sHIxQXmmqaJVdnRxCEImSwpl+DGSgTkFVK2Vpr1w/jvA2glLoWqNRa/36I\nOiODH8tfjeWA5XTY1c2v9bReEZUBEAShhBis6TdjhsQH2FrrzuJVfsz/O5jiSpf2c5tRj/l7aaed\nb107my/956MkJz1NRX1bYuJR73kx7bSTipd1rph22pl+7Gmdj8PLCkjk93+hBQyBKGsH0V9IBpwc\nMljTfwRYAPxCKTWHrqJWAbdiwjyXDKADN8qZLV7DqmUVnsdat6X2LOyp9XZFM+lstmPH288/eMW9\n110cbtFfce91nTV3rrj3ukJP4gHRz1WOsv4oawfRHzkGNThLKWXRlb0D8AlM2dsqzAQWT2HqnwTc\npLVeS+9Efccb08/av3YP1p+f2TOV5MSnOuzqlk2WzV+H5wwNKLLwTuT3P9HVH2XtIPojh4zIzQ8e\nYF38tZ9N99qqtmA5pM74U5NlUYEpAHZxkZh7b4yK/V9oEYMkytpB9EcOGZyVR7zWumZgP3Y6fCB1\nm0FLEAShkEgZhjyQdtpZ+PXbTgI2YTluasajz1kWWzDhL4fcGbQEQRAKhLT0h0jDqmXlk4+aSPy4\nN5/ONI+pw3IcbPd0TD39JmRKPkEQiggx/aGTevHAn7FTTj2W40H8CcwEHsG8rEuBdJHH9AVBKBEk\nvDNE2vWsukP7K3BbqwEa4+/e9XeWhQXEgNXA3UBKwjuCIBQDYvpDxG065l2ZPdPAg9T0h19KjHvt\n98B8TDx/BuZu6h6klLIgCEWAmP7QacaLAzbY2Vb/tSZgK6ay5hVIXF8QhCJBTH8ILFi+rhy4ceK7\n6+l4beIMK8ZfApP8vw9jJuR+AzPxd7d5caXVLwhCIZCO3DxQVZkixpuvgqm0mbs83IkbmmyFhlXL\niqEEgyAIJYS09IfA+hUL24DFn/+b6SSOe/luJG4vCEKRI2UY8kDaafeuuPe6Df7TxbnLc1vzRVZ3\nByK+/4m2/ihrB9EfOcT084Bv+mMwefkpTLZOgEsonl+kRHr/E239UdYOoj9ySEx/iDSsWlbu18a/\nGzMQ6x5gFrAFydoRBKHIENPPPy4mVfNyulr+giAIRYF05A6R1UtWti2ffzWYEM4BTEx/USiLRzp4\nBUEoGiSmnx+66c8x+DX+/2KO64+q/R8xoqwdRH/kENPPD536w3n4hDJ5itjwYRTt/wgSZe0g+iPH\noGL6/sTnwXSJ7cCntdYvhJYvAL6GqSV/u9b6J3nQWrSknXauuPe68p6MvcjNXhCEEmOwMf1FQFJr\nPRf4IrAiWKCUSgDfBy4AzgeuUkodM1ShxUrDqmXlKzbeCn7c3jf5bmUXBEEQioXBmv48YAOA1voJ\n4MzQstOAPVrrJq11B7AROG9IKoublOt1ZWYW4cArQRCETgZr+jVAc+h51g/5BMuaQssOArWD/Jyi\nxjf4u/1+kaX+y2uQbB1BEIqUwebpNwPVoee21jpo7jblLKsGDitC1gNF0aM8EO689Af4oR3uvPQH\n+9NOOzc/dju2ZbN8/tWtR3h7sRG5/Z9DlPVHWTuI/kIy4E7owZr+I8AC4BdKqTnAttCyXcBEpVQ9\ncAgT2vluP7YZuR70VLyMrW/tLL/z0h+0XnHvdRWYVr4NXJ6Kl/XnQlcsRD2DIcr6o6wdRH/kGKzp\n3wdcoJR6xH/+CaXUR4AqrfVtSqnPA7/BGOBPtdZv5EFrUdJD7D6YG1cQBKHokDz9POAXXKsInkew\nEzfS+59o64+ydhD9kUNq7wyRUMG1NUiapiAIRY7U3hEEQSghJLyTB4LwTk+t/Ijk7Ud6/xNt/VHW\nDqI/cojp54ce9efW4Sli4x+V+z8iRFk7iP7IIeEdQRCEEkJa+vmhV/0S3hkRoqw/ytpB9EcOMf38\nIPoLS5T1R1k7iP7IIeEdQRCEEkJMXxAEoYQQ088Daac9d4pEQRCEokRMf4iEJlFZ27BqWX2h9QiC\nIPSFmH4eyLpZgFnAPdLiFwShmJHsnTzQmG72rlp3w28xFTaXAukiT9HMJdL7n2jrj7J2EP2RQ1r6\neaAuVQNm3uClwN3IzFmCIBQpYvp5wm/ZSx19QRCKGgnv5IdO/REZgZvLqNn/ESTK2kH0Rw4x/TzQ\nV5XNiBDp/U+09UdZO4j+yDFg01dKlQN3Ae8CDgIf11q/k7PO9cAS/+mvtNZfP8JmI7vj/UlUWre+\ntXMDxV1Jsy8iu/99oqw/ytpB9EeOwcT0lwFbtdbnAXcCXw0vVEqdjOnQPEdrPQf4S6XU1CErFQRB\nEIbMYEx/HrDBf7wB+EDO8leBv9JaB7cQCSCKrd9+sXrJyrbl86+G6LbyBUEoIfqcI1cp9SngupyX\n3wKa/ccHgdrwQq21A+xXSlnAd4HNWus9+ZFbnKTiZVHruBUEoUTp0/S11j8Ffhp+TSl1L1DtP60G\nGnPfp5RKAbcDTcA1/dRSFD3KQ0D0F5Yo64+ydhD9hWTA/RF9mn4vPAJ8CHgS+CDwUHih38JfBzyg\ntf7OALYb5c6UqHcGif7CEWXtIPojx2Czd34GjAPagaVa671+xs4eIAb8N/AYXTvzS1rrx/vYbNR3\nvOgvLFHWH2XtIPojh+Tp5wfRX1iirD/K2kH0Rw4pwyAIglBCiOkLgiCUEGL6giAIJYSYviAIQgkh\npi8IglBCiOkLgiCUEGL6giAIJYSYviAIQgkhpi8IglBCiOkLgiCUEGL6giAIJYSYviAIQgkhpi8I\nglBCiOkLgiCUEGL6giAIJYSYviAIQgkx4OkS/Zmz7gLehZkY/eNa63d6WM8Gfgms1VrfOlShgiAI\nwtAZTEt/GbBVa30ecCfw1V7W+wZQR7QnHRYEQRhVDMb05wEb/McbgA/krqCUugzI+stLaioyQRCE\nYqbP8I5S6lPAdTkvvwU0+48PArU575kCfAS4DPjn/MgUBEEQ8kGfpq+1/inw0/BrSql7gWr/aTXQ\nmPO2jwHvBv4AnAhklFIvaa1/mw/BgiAIwuAZcEcu8AjwIeBJ4IPAQ+GFWusbgsdKqX8G3uiH4Uc9\nBCT6C0uU9UdZO4j+yDEY018J/Ewp9TDQDiwFUEpdD+zRWq/Poz5BEAQhj1ieJ8k1giAIpYIMzhIE\nQSghxPQFQRBKCDF9QRCEEkJMXxAEoYQYTPbOkOlP/R4/G2iJ//RXWuuvj6zK7vi1hH4ITMNkLX1a\na/1CaPkC4GuAA9yutf5JQYT2Qj/0fwT4HEb/duAarXXR9PIfSX9ovR8D+7TWXxphiX3Sj/1/FrAC\nk0L4f8AVWutMIbT2RD/0XwJ8GVN25Xat9Y8KIrQPlFJnA/+utX5vzutFfe4G9KF/QOduoVr6fdbv\nUUqdjEkFPUdrPQf4S6XU1JGX2Y1FQFJrPRf4IuYEBUAplQC+D1wAnA9cpZQ6piAqe6cv/eXAvwJ/\noWGsc4gAAAMgSURBVLWejxll/eGCqOydXvUHKKWuBqZQnPWe+tr/FvBj4Eqt9bnAA8BJBVHZO0fa\n/8HxPw9YrpSqpYhQSn0BuA0oy3k9CuduX/oHfO4WyvSPVL/nVeCvQlerBNA2Qtp6o1Oz1voJ4MzQ\nstMwYxSatNYdwEbgvJGX2Cd96U9jLrBp/3mcwu/vXPrSj1JqLjAbuJXiHHDTl/5JwD7g80qpPwF1\nWms94gr7ps/9D3RgCiyWY/Z/sV149wCLOfzYiMK5C73rH/C5O+ymr5T6lFJqe/gPczXqtX6P1trR\nWu9XSllKqe8Bm7XWe4Zb6xGooUszQNa/5Q2WNYWWHfadioBe9WutPa312wBKqWuBSq317wugsS96\n1a+UGgf8E/B3FKfhQ9/Hz9HAXOBmTAPo/Uqp91Jc9KUfTMv/aWAHsF5rHV634Git12DCH7lE4dzt\nVf9gzt1hj+kPsn4PSqkUcDvmB7lmmGX2h2a6NAPYWmvXf9yUs6waODBSwvpJX/qDmO13gFOAS0dY\nW3/oS/9lGOP8FTAWqFBK7dRa3znCGvuiL/37MK1NDaCU2oBpSf9xZCX2Sa/6lVLvwVxwxwOtwF1K\nqcu01v8z8jIHTBTO3T4Z6LlbqPBOUL8Heqjf48c41wFbtNbLiqRDsVOzUmoOsC20bBcwUSlVr5RK\nYm4PHxt5iX3Sl34wYZEy4JLQrWIx0at+rfXNWusz/Q6ufwfuLjLDh773/4tAlVJqgv/8XEyLuZjo\nS38KU0q93b8Q7MWEeqJAFM7dIzGgc7cgZRj8zoefAePw6/dorfcG9XuAGPDfmJ0f3K5/SWv9+IiL\n9fEvREH2AsAngDOAKq31bUqpD2NCDDbwU631ysIo7Zm+9ANP+X/hi+9NWuu1IyqyD460/0PrfRxQ\nWusvj7zK3unH8RNcsCzgEa319YVR2jP90H89JvkijTmH/1Zr3VM4pWAopU7ENAjm+hkvkTh3A3rS\nzyDOXam9IwiCUELI4CxBEIQSQkxfEAShhBDTFwRBKCHE9AVBEEoIMX1BEIQSQkxfEAShhBDTFwRB\nKCHE9AVBEEqI/w/Ldp3AJmw+XgAAAABJRU5ErkJggg==\n", 527 | "text/plain": [ 528 | "" 529 | ] 530 | }, 531 | "metadata": {}, 532 | "output_type": "display_data" 533 | } 534 | ], 535 | "source": [ 536 | "import math\n", 537 | "import matplotlib.pyplot as plt\n", 538 | "import matplotlib\n", 539 | "%matplotlib inline\n", 540 | "import time\n", 541 | "\n", 542 | "fig = plt.figure()\n", 543 | "plt.plot(eval_train.keys(), eval_train.values(), label='training data')\n", 544 | "plt.plot(eval_test.keys(), eval_test.values(), label='testing data')\n", 545 | "plt.legend()\n", 546 | "plt.ylabel(\"Loss\")\n", 547 | "plt.xlabel('Epochs')\n", 548 | "plt.show()\n", 549 | "\n", 550 | "y = model.get_output(test_x, deterministic=False).eval()\n", 551 | "fig = plt.figure()\n", 552 | "plt.scatter(np.array(test_x_unshared), np.array(test_t_unshared), label=\"t\", color=(1.0,0,0,0.2))\n", 553 | "plt.scatter(np.array(test_x_unshared), np.array(y), label=\"y\", color=(0,0.7,0,0.1))\n", 554 | "plt.legend()\n", 555 | "plt.show()" 556 | ] 557 | } 558 | ], 559 | "metadata": { 560 | "kernelspec": { 561 | "display_name": "Python 2", 562 | "language": "python", 563 | "name": "python2" 564 | }, 565 | "language_info": { 566 | "codemirror_mode": { 567 | "name": "ipython", 568 | "version": 2 569 | }, 570 | "file_extension": ".py", 571 | "mimetype": "text/x-python", 572 | "name": "python", 573 | "nbconvert_exporter": "python", 574 | "pygments_lexer": "ipython2", 575 | "version": "2.7.10" 576 | } 577 | }, 578 | "nbformat": 4, 579 | "nbformat_minor": 0 580 | } 581 | -------------------------------------------------------------------------------- /day4-VAE/mnist.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day4-VAE/mnist.npz -------------------------------------------------------------------------------- /day5-ladder/mnist.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day5-ladder/mnist.npz -------------------------------------------------------------------------------- /day5-ladder/prob_to_denoising.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepLearningDTU/Summerschool_2015/972cddcb517b873d10f3adde2e7a14f61f69219e/day5-ladder/prob_to_denoising.pdf --------------------------------------------------------------------------------