├── .gitignore
├── Convolutional-NeuralNet
├── code
│ └── tf_cnn_mnist_classification.py
└── tf_cnn_mnist_classification.ipynb
├── Deconvolutional-Autoencoder
├── code
│ └── tf_dcae_mnist_reconstruction.py
└── tf_dcae_mnist_reconstruction.ipynb
├── Denoising-Autoencoder
├── code
│ └── tf_dae_mnist_reconstruction.py
└── tf_dae_mnist_reconstruction.ipynb
├── Feedforward-NeuralNet
├── code
│ ├── np_nn_iris_classification.py
│ └── tf_nn_iris_classification.py
├── np_nn_iris_classification.ipynb
└── tf_nn_iris_classification.ipynb
├── Hopfield-Network
├── code
│ └── np_hnn_reconstruction.py
└── np_hnn_reconstruction.ipynb
├── README.md
├── Recurrent-NeuralNet
├── code
│ ├── np_rnn_addition.py
│ ├── tf_lstm_climate_timeseries.py
│ └── tf_rnn_addition.py
├── datasets
│ └── co2-ppm-mauna-loa-19651980.csv
├── np_rnn_addition.ipynb
├── tf_lstm_climate_timeseries.ipynb
└── tf_rnn_addition.ipynb
└── Restricted-Boltzmann-Machine
├── code
└── tf_rbm_mnist_reconstruction.py
└── tf_rbm_mnist_reconstruction.ipynb
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *,cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 |
93 | # Rope project settings
94 | .ropeproject
95 |
96 | # Build files generated by Intellij
97 | .idea
98 |
99 | # Datasets Folder
100 | MNIST_data
101 |
102 | # Temporary folder
103 | tmp
104 | temp
105 |
106 | # Hide all hidden files
107 | .*
108 | !/.gitignore
109 |
--------------------------------------------------------------------------------
/Convolutional-NeuralNet/code/tf_cnn_mnist_classification.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import tensorflow as tf
4 | from tensorflow.examples.tutorials.mnist import input_data
5 |
6 | # load the mmist dataset from tensorflow.examples
7 | mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)
8 |
9 | # Neural Network
10 |
11 | # Hyper-parameters
12 | n_image_width = 28
13 | n_image_height = 28
14 | n_input_pixels = n_image_height * n_image_width
15 | filter_width = 5
16 | filter_height = 5
17 | n_classes = 10 # digits 0-9
18 | n_channels = 1 # black
19 |
20 | con_1_features = 16
21 | con_2_features = 32
22 |
23 | learning_rate = 0.001
24 |
25 | batch_size = 50
26 |
27 | # Input/Output Placeholders
28 | X = tf.placeholder(dtype=tf.float32, shape=[None, n_input_pixels])
29 | Y = tf.placeholder(dtype=tf.float32, shape=[None, n_classes])
30 |
31 | # Layer Weights and biases
32 | conv_lay_1 = {
33 | 'weight':
34 | tf.Variable(
35 | tf.random_normal(
36 | [filter_height, filter_width, n_channels, con_1_features],
37 | stddev=0.1)),
38 | 'bias':
39 | tf.Variable(tf.random_normal([con_1_features], stddev=0.1))
40 | }
41 | conv_lay_2 = {
42 | 'weight':
43 | tf.Variable(
44 | tf.random_normal(
45 | [filter_height, filter_width, con_1_features, con_2_features],
46 | stddev=0.1)),
47 | 'bias':
48 | tf.Variable(tf.random_normal([con_2_features], stddev=0.1))
49 | }
50 | fc_nn_lay_1 = {
51 | 'weight':
52 | tf.Variable(
53 | tf.random_normal(
54 | [7 * 7 * con_2_features, n_classes],
55 | stddev=0.1)),
56 | 'bias':
57 | tf.Variable(tf.random_normal([n_classes], stddev=0.1))
58 | }
59 |
60 | # Model
61 |
62 | # Resize image to proper shape
63 | x_img = tf.reshape(X, [-1, n_image_width, n_image_height,
64 | n_channels]) # [batch, height, width, channels]
65 |
66 | h_conv_1 = tf.nn.conv2d(
67 | x_img, conv_lay_1['weight'], strides=[1, 1, 1, 1], padding='SAME')
68 | h_relu_1 = tf.nn.relu(h_conv_1 + conv_lay_1['bias'])
69 | op_pool_1 = tf.nn.max_pool(
70 | h_relu_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
71 |
72 | h_conv_2 = tf.nn.conv2d(
73 | op_pool_1, conv_lay_2['weight'], strides=[1, 1, 1, 1], padding='SAME')
74 | h_relu_2 = tf.nn.relu(h_conv_2 + conv_lay_2['bias'])
75 | op_pool_2 = tf.nn.max_pool(
76 | h_relu_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
77 |
78 | flat_lay_3 = tf.reshape(op_pool_2, [-1, 7 * 7 * con_2_features])
79 |
80 | h_nn_1 = tf.matmul(flat_lay_3, fc_nn_lay_1['weight']) + fc_nn_lay_1['bias']
81 | final_op = tf.nn.sigmoid(h_nn_1)
82 |
83 | # Error and Optimizer
84 |
85 | # mean-squared error
86 | error = tf.reduce_mean(0.5 * tf.square(final_op - Y))
87 |
88 | # adam-optimizer
89 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)
90 |
91 | # Prediction for test
92 | correct_pred = tf.equal(tf.argmax(final_op, 1), tf.argmax(Y, 1))
93 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
94 |
95 | # Start Session
96 | with tf.Session() as sess:
97 | tf.global_variables_initializer().run()
98 |
99 | print("*********** Train ***********")
100 |
101 | train_examples = len(mnist_data.train.images)
102 |
103 | for i in range(train_examples // batch_size):
104 | train_batch = mnist_data.train.next_batch(batch_size)
105 | _, err = sess.run(
106 | [optimizer, error],
107 | feed_dict={X: train_batch[0],
108 | Y: train_batch[1]})
109 |
110 | if i % 100 == 0:
111 | validation_batch = mnist_data.validation.next_batch(batch_size)
112 | acc = accuracy.eval({
113 | X: validation_batch[0],
114 | Y: validation_batch[1]
115 | })
116 | print("Batch: %d validation-error = %f accuracy = %f" %
117 | (i, err, acc * 100))
118 |
119 | print("*********** Test ***********")
120 |
121 | acc = accuracy.eval({X: mnist_data.test.images, Y: mnist_data.test.labels})
122 | print("Final Accuracy = %f" % (acc * 100))
123 |
--------------------------------------------------------------------------------
/Convolutional-NeuralNet/tf_cnn_mnist_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Convolutional Neural Network\n",
8 | "###### MNIST classification"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import tensorflow as tf\n",
27 | "from tensorflow.examples.tutorials.mnist import input_data"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "load the mmist dataset from tensorflow.examples"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 2,
40 | "metadata": {},
41 | "outputs": [
42 | {
43 | "name": "stdout",
44 | "output_type": "stream",
45 | "text": [
46 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
47 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
48 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
49 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
50 | ]
51 | }
52 | ],
53 | "source": [
54 | "mnist_data = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "### Neural Network"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "
Hyper-parameters"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 3,
74 | "metadata": {
75 | "collapsed": true
76 | },
77 | "outputs": [],
78 | "source": [
79 | "n_image_width = 28\n",
80 | "n_image_height = 28\n",
81 | "n_input_pixels = n_image_height * n_image_width\n",
82 | "filter_width = 5\n",
83 | "filter_height = 5\n",
84 | "n_classes = 10 # digits 0-9\n",
85 | "n_channels = 1 # black\n",
86 | "\n",
87 | "con_1_features = 16\n",
88 | "con_2_features = 32\n",
89 | "\n",
90 | "learning_rate = 0.001\n",
91 | "\n",
92 | "batch_size = 50"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "Input/Output Placeholders"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 4,
105 | "metadata": {
106 | "collapsed": true
107 | },
108 | "outputs": [],
109 | "source": [
110 | "X = tf.placeholder(dtype=tf.float32, shape=[None, n_input_pixels])\n",
111 | "Y = tf.placeholder(dtype=tf.float32, shape=[None, n_classes])"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {},
117 | "source": [
118 | "Layer Weights and biases"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": 5,
124 | "metadata": {
125 | "collapsed": true
126 | },
127 | "outputs": [],
128 | "source": [
129 | "conv_lay_1 = {\n",
130 | " 'weight': tf.Variable(\n",
131 | " tf.random_normal([filter_height, filter_width, n_channels, con_1_features], stddev=0.1)),\n",
132 | " 'bias': tf.Variable(tf.random_normal([con_1_features], stddev=0.1))\n",
133 | "}\n",
134 | "conv_lay_2 = {\n",
135 | " 'weight': tf.Variable(\n",
136 | " tf.random_normal([filter_height, filter_width, con_1_features, con_2_features], stddev=0.1)),\n",
137 | " 'bias': tf.Variable(tf.random_normal([con_2_features], stddev=0.1))\n",
138 | "}\n",
139 | "fc_nn_lay_1 = {\n",
140 | " 'weight': tf.Variable(\n",
141 | " tf.random_normal([7 * 7 * con_2_features, n_classes], stddev=0.1)),\n",
142 | " 'bias': tf.Variable(tf.random_normal([n_classes], stddev=0.1))\n",
143 | "}"
144 | ]
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "metadata": {},
149 | "source": [
150 | "#### Model"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 6,
156 | "metadata": {
157 | "collapsed": true
158 | },
159 | "outputs": [],
160 | "source": [
161 | "# Resize image to proper shape\n",
162 | "x_img = tf.reshape(X, [-1, n_image_width, n_image_height,\n",
163 | " n_channels]) # [batch, height, width, channels]\n",
164 | "\n",
165 | "h_conv_1 = tf.nn.conv2d(x_img, conv_lay_1['weight'], strides=[1, 1, 1, 1], padding='SAME')\n",
166 | "h_relu_1 = tf.nn.relu(h_conv_1 + conv_lay_1['bias'])\n",
167 | "op_pool_1 = tf.nn.max_pool(h_relu_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n",
168 | "\n",
169 | "h_conv_2 = tf.nn.conv2d(op_pool_1, conv_lay_2['weight'], strides=[1, 1, 1, 1], padding='SAME')\n",
170 | "h_relu_2 = tf.nn.relu(h_conv_2 + conv_lay_2['bias'])\n",
171 | "op_pool_2 = tf.nn.max_pool(h_relu_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n",
172 | "\n",
173 | "flat_lay_3 = tf.reshape(op_pool_2, [-1, 7 * 7 * con_2_features])\n",
174 | "\n",
175 | "h_nn_1 = tf.matmul(flat_lay_3, fc_nn_lay_1['weight']) + fc_nn_lay_1['bias']\n",
176 | "final_op = tf.nn.sigmoid(h_nn_1)"
177 | ]
178 | },
179 | {
180 | "cell_type": "markdown",
181 | "metadata": {},
182 | "source": [
183 | "#### Error and Optimizer"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": 7,
189 | "metadata": {
190 | "collapsed": true
191 | },
192 | "outputs": [],
193 | "source": [
194 | "# mean-squared error\n",
195 | "error = tf.reduce_mean(0.5 * tf.square(final_op - Y))\n",
196 | "\n",
197 | "# adam-optimizer\n",
198 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)"
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "metadata": {},
204 | "source": [
205 | "##### Prediction for test"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": 8,
211 | "metadata": {
212 | "collapsed": true
213 | },
214 | "outputs": [],
215 | "source": [
216 | "correct_pred = tf.equal(tf.argmax(final_op, 1), tf.argmax(Y, 1))\n",
217 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
218 | ]
219 | },
220 | {
221 | "cell_type": "markdown",
222 | "metadata": {},
223 | "source": [
224 | "#### Start Session"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": 9,
230 | "metadata": {},
231 | "outputs": [
232 | {
233 | "name": "stdout",
234 | "output_type": "stream",
235 | "text": [
236 | "*********** Train ***********\n",
237 | "Batch: 0 validation-error = 0.208108 accuracy = 6.000000\n",
238 | "Batch: 100 validation-error = 0.027116 accuracy = 69.999999\n",
239 | "Batch: 200 validation-error = 0.018338 accuracy = 72.000003\n",
240 | "Batch: 300 validation-error = 0.021567 accuracy = 68.000001\n",
241 | "Batch: 400 validation-error = 0.010519 accuracy = 86.000001\n",
242 | "Batch: 500 validation-error = 0.010969 accuracy = 83.999997\n",
243 | "Batch: 600 validation-error = 0.008034 accuracy = 81.999999\n",
244 | "Batch: 700 validation-error = 0.004340 accuracy = 98.000002\n",
245 | "Batch: 800 validation-error = 0.007647 accuracy = 94.000000\n",
246 | "Batch: 900 validation-error = 0.001807 accuracy = 98.000002\n",
247 | "Batch: 1000 validation-error = 0.003638 accuracy = 94.000000\n",
248 | "*********** Test ***********\n",
249 | "Final Accuracy = 96.820003\n"
250 | ]
251 | }
252 | ],
253 | "source": [
254 | "with tf.Session() as sess:\n",
255 | " tf.global_variables_initializer().run()\n",
256 | "\n",
257 | " print(\"*********** Train ***********\")\n",
258 | "\n",
259 | " train_examples = len(mnist_data.train.images)\n",
260 | "\n",
261 | " for i in range(train_examples // batch_size):\n",
262 | " train_batch = mnist_data.train.next_batch(batch_size)\n",
263 | " _, err = sess.run([optimizer, error], feed_dict={X: train_batch[0], Y: train_batch[1]})\n",
264 | "\n",
265 | " if i % 100 == 0:\n",
266 | " validation_batch = mnist_data.validation.next_batch(batch_size)\n",
267 | " acc = accuracy.eval({X: validation_batch[0], Y: validation_batch[1]})\n",
268 | " print(\"Batch: %d validation-error = %f accuracy = %f\" % (i, err, acc * 100))\n",
269 | "\n",
270 | " print(\"*********** Test ***********\")\n",
271 | "\n",
272 | " acc = accuracy.eval({X: mnist_data.test.images, Y: mnist_data.test.labels})\n",
273 | " print(\"Final Accuracy = %f\" % (acc * 100))"
274 | ]
275 | }
276 | ],
277 | "metadata": {
278 | "kernelspec": {
279 | "display_name": "Python 3",
280 | "language": "python",
281 | "name": "python3"
282 | },
283 | "language_info": {
284 | "codemirror_mode": {
285 | "name": "ipython",
286 | "version": 3
287 | },
288 | "file_extension": ".py",
289 | "mimetype": "text/x-python",
290 | "name": "python",
291 | "nbconvert_exporter": "python",
292 | "pygments_lexer": "ipython3",
293 | "version": "3.6.0"
294 | }
295 | },
296 | "nbformat": 4,
297 | "nbformat_minor": 2
298 | }
299 |
--------------------------------------------------------------------------------
/Deconvolutional-Autoencoder/code/tf_dcae_mnist_reconstruction.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from tensorflow.examples.tutorials.mnist import input_data
6 | import matplotlib.pyplot as plt
7 |
8 | # Util functions
9 |
10 |
11 | # function to plot the images after during testing phase
12 | def plot_images(images, title, no_i_x, no_i_y=2):
13 | fig = plt.figure(figsize=(5, 15))
14 | fig.canvas.set_window_title(title)
15 | images = np.array(images).reshape(-1, 28, 28)
16 | for i in range(no_i_x):
17 | for j in range(no_i_y):
18 | ax = fig.add_subplot(no_i_x, no_i_y, no_i_x * j + (i + 1))
19 | ax.matshow(images[no_i_x * j + i], cmap="gray")
20 | plt.xticks(np.array([]))
21 | plt.yticks(np.array([]))
22 |
23 | if j == 0 and i == 0:
24 | ax.set_title("Real")
25 | elif j == 0 and i == 1:
26 | ax.set_title("Reconstructed")
27 |
28 |
29 | # load the mmist dataset from tensorflow.examples
30 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
31 |
32 | # Neural Network Model
33 | # Encoder = 2 convolutional layers + 1 fully connected layer
34 | # Decoder = 2 de-convolutional layers + 1 fully connected layer
35 |
36 | # Hyper-parameters
37 | n_image_width = 28
38 | n_image_height = 28
39 | n_input_pixels = n_image_height * n_image_width
40 | filter_width = 5
41 | filter_height = 5
42 | n_channels = 1 # black
43 |
44 | conv_1_features = 16
45 | conv_2_features = 32
46 |
47 | final_hid_lay = 196
48 |
49 | learning_rate = 0.001
50 | batch_size = 20
51 |
52 | images_to_plot = 10
53 |
54 | # input placeholder
55 | X = tf.placeholder(tf.float32, [None, n_input_pixels])
56 |
57 | # Layer Weights and biases
58 | conv_lay_1 = {
59 | 'weight':
60 | tf.Variable(
61 | tf.random_normal(
62 | [filter_height, filter_width, n_channels, conv_1_features],
63 | stddev=0.1)),
64 | 'bias':
65 | tf.Variable(tf.random_normal([conv_1_features], stddev=0.1))
66 | }
67 | conv_lay_2 = {
68 | 'weight':
69 | tf.Variable(
70 | tf.random_normal(
71 | [filter_height, filter_width, conv_1_features, conv_2_features],
72 | stddev=0.1)),
73 | 'bias':
74 | tf.Variable(tf.random_normal([conv_2_features], stddev=0.1))
75 | }
76 | fc_enc_lay_1 = {
77 | 'weight':
78 | tf.Variable(
79 | tf.random_normal(
80 | [7 * 7 * conv_2_features, final_hid_lay],
81 | stddev=0.1)),
82 | 'bias':
83 | tf.Variable(tf.random_normal([final_hid_lay], stddev=0.1))
84 | }
85 |
86 | deconv_lay_1 = {
87 | 'weight':
88 | tf.Variable(
89 | tf.random_normal(
90 | [filter_height, filter_width, conv_1_features, conv_2_features],
91 | stddev=0.1)),
92 | 'bias':
93 | tf.Variable(tf.random_normal([conv_1_features], stddev=0.1))
94 | }
95 | deconv_lay_2 = {
96 | 'weight':
97 | tf.Variable(
98 | tf.random_normal(
99 | [filter_height, filter_width, n_channels, conv_1_features],
100 | stddev=0.1)),
101 | 'bias':
102 | tf.Variable(tf.random_normal([n_channels], stddev=0.1))
103 | }
104 | fc_dec_lay_2 = {
105 | 'weight':
106 | tf.Variable(
107 | tf.random_normal(
108 | [final_hid_lay, 7 * 7 * conv_2_features],
109 | stddev=0.1)),
110 | 'bias':
111 | tf.Variable(tf.random_normal([7 * 7 * conv_2_features], stddev=0.1))
112 | }
113 |
114 | # Encoder -> 2 convoltional layers (without max-pooling)
115 | # Resize image to proper shape
116 | x_img = tf.reshape(X, [-1, n_image_width, n_image_height,
117 | n_channels]) # [batch, height, width, channels]
118 |
119 | conv_1 = tf.nn.conv2d(
120 | x_img, conv_lay_1['weight'], strides=[1, 2, 2, 1], padding='SAME')
121 | conv_1_op = tf.nn.relu(conv_1 + conv_lay_1['bias'])
122 |
123 | conv_2 = tf.nn.conv2d(
124 | conv_1_op, conv_lay_2['weight'], strides=[1, 2, 2, 1], padding='SAME')
125 | conv_2_op = tf.nn.relu(conv_2 + conv_lay_2['bias'])
126 |
127 | # Encoder -> 1 fully connected layer
128 | flat_lay = tf.reshape(conv_2_op, [-1, 7 * 7 * conv_2_features])
129 | enc_1 = tf.matmul(flat_lay, fc_enc_lay_1['weight']) + fc_enc_lay_1['bias']
130 | enc_op = tf.nn.relu(enc_1)
131 |
132 | # Decoder -> 1 fully connected layer
133 | dec_1 = tf.matmul(enc_op, fc_dec_lay_2['weight']) + fc_dec_lay_2['bias']
134 | dec_flat = tf.nn.relu(dec_1)
135 | dec_op = tf.reshape(dec_flat, [-1, 7, 7, conv_2_features])
136 |
137 | # Decoder -> 2 de-convoltional layers
138 | dconv_1 = tf.nn.conv2d_transpose(
139 | dec_op,
140 | deconv_lay_1['weight'],
141 | strides=[1, 2, 2, 1],
142 | output_shape=[
143 | batch_size, n_image_width // 2, n_image_height // 2, conv_1_features
144 | ],
145 | padding='SAME')
146 | dconv_1_op = tf.nn.relu(dconv_1 + deconv_lay_1['bias'])
147 |
148 | dconv_2 = tf.nn.conv2d_transpose(
149 | dconv_1_op,
150 | deconv_lay_2['weight'],
151 | strides=[1, 2, 2, 1],
152 | output_shape=[batch_size, n_image_width, n_image_height, n_channels],
153 | padding='SAME')
154 | dconv_2_op = tf.nn.relu(dconv_2 + deconv_lay_2['bias'])
155 |
156 | # Error and Optimizer
157 |
158 | # mean-squared error
159 | error = tf.reduce_mean(0.5 * tf.square(dconv_2_op - x_img))
160 |
161 | # adam-optimizer
162 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)
163 |
164 | # Start Session
165 | disp_imgs = []
166 |
167 | with tf.Session() as sess:
168 | tf.global_variables_initializer().run()
169 |
170 | print("*********** Train ***********")
171 |
172 | train_examples = len(mnist.train.images)
173 |
174 | for i in range(train_examples // batch_size):
175 | train_batch = mnist.train.next_batch(batch_size)
176 | _, err = sess.run([optimizer, error], feed_dict={X: train_batch[0]})
177 |
178 | if i % 100 == 0 and i != 0:
179 | validation_batch = mnist.validation.next_batch(batch_size)
180 | err = sess.run(error, feed_dict={X: validation_batch[0]})
181 | print("Batch: %d validation-error = %f" % (i, err))
182 |
183 | print("*********** Test ***********")
184 | test_batch = mnist.test.next_batch(batch_size)
185 | reconstructed_imgs, err = sess.run(
186 | [dconv_2_op, error], feed_dict={X: test_batch[0]})
187 |
188 | print("Test Error: %.8f" % err)
189 |
190 | for i in range(images_to_plot):
191 | disp_imgs.append(test_batch[0][i].reshape(-1, 28, 28))
192 | disp_imgs.append(reconstructed_imgs[i].reshape(-1, 28, 28))
193 |
194 | # plot output
195 | plot_images(disp_imgs, "Auto-encoder", images_to_plot)
196 | plt.show()
197 |
--------------------------------------------------------------------------------
/Deconvolutional-Autoencoder/tf_dcae_mnist_reconstruction.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# De-convolutional Autoencoder\n",
8 | "###### MNIST reconstruction"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import tensorflow as tf\n",
28 | "from tensorflow.examples.tutorials.mnist import input_data\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "%matplotlib inline"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "### Util functions"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "function to plot the images after during testing phase"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 2,
50 | "metadata": {
51 | "collapsed": true
52 | },
53 | "outputs": [],
54 | "source": [
55 | "def plot_images(images, title, no_i_x, no_i_y=2):\n",
56 | " fig = plt.figure(figsize=(5, 15))\n",
57 | " fig.canvas.set_window_title(title)\n",
58 | " images = np.array(images).reshape(-1, 28, 28)\n",
59 | " for i in range(no_i_x):\n",
60 | " for j in range(no_i_y):\n",
61 | " ax = fig.add_subplot(no_i_x, no_i_y, no_i_x * j + (i + 1))\n",
62 | " ax.matshow(images[no_i_x * j + i], cmap=\"gray\")\n",
63 | " plt.xticks(np.array([]))\n",
64 | " plt.yticks(np.array([]))\n",
65 | "\n",
66 | " if j == 0 and i == 0:\n",
67 | " ax.set_title(\"Real\")\n",
68 | " elif j == 0 and i == 1:\n",
69 | " ax.set_title(\"Reconstructed\")"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "load the mmist dataset from tensorflow.examples"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": 3,
82 | "metadata": {},
83 | "outputs": [
84 | {
85 | "name": "stdout",
86 | "output_type": "stream",
87 | "text": [
88 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
89 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
90 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
91 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
92 | ]
93 | }
94 | ],
95 | "source": [
96 | "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "### Neural Network Model\n",
104 | "Encoder = 2 convolutional layers + 1 fully connected layer
\n",
105 | "Decoder = 2 de-convolutional layers + 1 fully connected layer
"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "
Hyper-parameters"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 4,
118 | "metadata": {
119 | "collapsed": true
120 | },
121 | "outputs": [],
122 | "source": [
123 | "n_image_width = 28\n",
124 | "n_image_height = 28\n",
125 | "n_input_pixels = n_image_height * n_image_width\n",
126 | "filter_width = 5\n",
127 | "filter_height = 5\n",
128 | "n_channels = 1 # black\n",
129 | "\n",
130 | "conv_1_features = 16\n",
131 | "conv_2_features = 32\n",
132 | "\n",
133 | "final_hid_lay = 196\n",
134 | "\n",
135 | "learning_rate = 0.001\n",
136 | "batch_size = 20\n",
137 | "\n",
138 | "images_to_plot = 10"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "input placeholder"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 5,
151 | "metadata": {
152 | "collapsed": true
153 | },
154 | "outputs": [],
155 | "source": [
156 | "X = tf.placeholder(tf.float32, [None, n_input_pixels])"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "metadata": {},
162 | "source": [
163 | "Layer Weights and biases"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": 6,
169 | "metadata": {
170 | "collapsed": true
171 | },
172 | "outputs": [],
173 | "source": [
174 | "conv_lay_1 = {\n",
175 | " 'weight': tf.Variable(tf.random_normal(\n",
176 | " [filter_height, filter_width, n_channels, conv_1_features], stddev=0.1)),\n",
177 | " 'bias': tf.Variable(tf.random_normal([conv_1_features], stddev=0.1))\n",
178 | "}\n",
179 | "conv_lay_2 = {\n",
180 | " 'weight': tf.Variable(tf.random_normal(\n",
181 | " [filter_height, filter_width, conv_1_features, conv_2_features], stddev=0.1)),\n",
182 | " 'bias': tf.Variable(tf.random_normal([conv_2_features], stddev=0.1))\n",
183 | "}\n",
184 | "fc_enc_lay_1 = {\n",
185 | " 'weight': tf.Variable(tf.random_normal(\n",
186 | " [7 * 7 * conv_2_features, final_hid_lay], stddev=0.1)),\n",
187 | " 'bias': tf.Variable(tf.random_normal([final_hid_lay], stddev=0.1))\n",
188 | "}\n",
189 | "\n",
190 | "deconv_lay_1 = {\n",
191 | " 'weight': tf.Variable(tf.random_normal(\n",
192 | " [filter_height, filter_width, conv_1_features, conv_2_features], stddev=0.1)),\n",
193 | " 'bias': tf.Variable(tf.random_normal([conv_1_features], stddev=0.1))\n",
194 | "}\n",
195 | "deconv_lay_2 = {\n",
196 | " 'weight': tf.Variable(tf.random_normal(\n",
197 | " [filter_height, filter_width, n_channels, conv_1_features], stddev=0.1)),\n",
198 | " 'bias': tf.Variable(tf.random_normal([n_channels], stddev=0.1))\n",
199 | "}\n",
200 | "fc_dec_lay_2 = {\n",
201 | " 'weight': tf.Variable(tf.random_normal(\n",
202 | " [final_hid_lay, 7 * 7 * conv_2_features], stddev=0.1)),\n",
203 | " 'bias': tf.Variable(tf.random_normal([7 * 7 * conv_2_features], stddev=0.1))\n",
204 | "}"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "Encoder -> 2 convoltional layers (without max-pooling)"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": 7,
217 | "metadata": {
218 | "collapsed": true
219 | },
220 | "outputs": [],
221 | "source": [
222 | "# Resize image to proper shape\n",
223 | "x_img = tf.reshape(X, [-1, n_image_width, n_image_height,\n",
224 | " n_channels]) # [batch, height, width, channels]\n",
225 | "\n",
226 | "conv_1 = tf.nn.conv2d(x_img, conv_lay_1['weight'], strides=[1, 2, 2, 1], padding='SAME')\n",
227 | "conv_1_op = tf.nn.relu(conv_1 + conv_lay_1['bias'])\n",
228 | "\n",
229 | "conv_2 = tf.nn.conv2d(conv_1_op, conv_lay_2['weight'], strides=[1, 2, 2, 1], padding='SAME')\n",
230 | "conv_2_op = tf.nn.relu(conv_2 + conv_lay_2['bias'])"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "metadata": {},
236 | "source": [
237 | "Encoder -> 1 fully connected layer"
238 | ]
239 | },
240 | {
241 | "cell_type": "code",
242 | "execution_count": 8,
243 | "metadata": {
244 | "collapsed": true
245 | },
246 | "outputs": [],
247 | "source": [
248 | "flat_lay = tf.reshape(conv_2_op, [-1, 7 * 7 * conv_2_features])\n",
249 | "enc_1 = tf.matmul(flat_lay, fc_enc_lay_1['weight']) + fc_enc_lay_1['bias']\n",
250 | "enc_op = tf.nn.relu(enc_1)"
251 | ]
252 | },
253 | {
254 | "cell_type": "markdown",
255 | "metadata": {},
256 | "source": [
257 | "Decoder -> 1 fully connected layer"
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": 9,
263 | "metadata": {
264 | "collapsed": true
265 | },
266 | "outputs": [],
267 | "source": [
268 | "dec_1 = tf.matmul(enc_op, fc_dec_lay_2['weight']) + fc_dec_lay_2['bias']\n",
269 | "dec_flat = tf.nn.relu(dec_1)\n",
270 | "dec_op = tf.reshape(dec_flat, [-1, 7, 7, conv_2_features])"
271 | ]
272 | },
273 | {
274 | "cell_type": "markdown",
275 | "metadata": {},
276 | "source": [
277 | "Decoder -> 2 de-convoltional layers"
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": 10,
283 | "metadata": {
284 | "collapsed": true
285 | },
286 | "outputs": [],
287 | "source": [
288 | "dconv_1 = tf.nn.conv2d_transpose(dec_op, deconv_lay_1['weight'], strides=[1, 2, 2, 1],\n",
289 | " output_shape=[\n",
290 | " batch_size, n_image_width // 2, n_image_height // 2, conv_1_features\n",
291 | " ],\n",
292 | " padding='SAME')\n",
293 | "dconv_1_op = tf.nn.relu(dconv_1 + deconv_lay_1['bias'])\n",
294 | "\n",
295 | "dconv_2 = tf.nn.conv2d_transpose(dconv_1_op, deconv_lay_2['weight'], strides=[1, 2, 2, 1],\n",
296 | " output_shape=[\n",
297 | " batch_size, n_image_width, n_image_height, n_channels\n",
298 | " ],\n",
299 | " padding='SAME')\n",
300 | "dconv_2_op = tf.nn.relu(dconv_2 + deconv_lay_2['bias'])"
301 | ]
302 | },
303 | {
304 | "cell_type": "markdown",
305 | "metadata": {},
306 | "source": [
307 | "#### Error and Optimizer"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": 11,
313 | "metadata": {
314 | "collapsed": true
315 | },
316 | "outputs": [],
317 | "source": [
318 | "# mean-squared error\n",
319 | "error = tf.reduce_mean(0.5 * tf.square(dconv_2_op - x_img))\n",
320 | "\n",
321 | "# adam-optimizer\n",
322 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)"
323 | ]
324 | },
325 | {
326 | "cell_type": "markdown",
327 | "metadata": {},
328 | "source": [
329 | "#### Start Session"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": 12,
335 | "metadata": {},
336 | "outputs": [
337 | {
338 | "name": "stdout",
339 | "output_type": "stream",
340 | "text": [
341 | "*********** Train ***********\n",
342 | "Batch: 100 validation-error = 0.018546\n",
343 | "Batch: 200 validation-error = 0.012491\n",
344 | "Batch: 300 validation-error = 0.008346\n",
345 | "Batch: 400 validation-error = 0.007418\n",
346 | "Batch: 500 validation-error = 0.006778\n",
347 | "Batch: 600 validation-error = 0.005017\n",
348 | "Batch: 700 validation-error = 0.005313\n",
349 | "Batch: 800 validation-error = 0.004810\n",
350 | "Batch: 900 validation-error = 0.004359\n",
351 | "Batch: 1000 validation-error = 0.004376\n",
352 | "Batch: 1100 validation-error = 0.003287\n",
353 | "Batch: 1200 validation-error = 0.003823\n",
354 | "Batch: 1300 validation-error = 0.003952\n",
355 | "Batch: 1400 validation-error = 0.002933\n",
356 | "Batch: 1500 validation-error = 0.003407\n",
357 | "Batch: 1600 validation-error = 0.002925\n",
358 | "Batch: 1700 validation-error = 0.002912\n",
359 | "Batch: 1800 validation-error = 0.002394\n",
360 | "Batch: 1900 validation-error = 0.002522\n",
361 | "Batch: 2000 validation-error = 0.002918\n",
362 | "Batch: 2100 validation-error = 0.002519\n",
363 | "Batch: 2200 validation-error = 0.002560\n",
364 | "Batch: 2300 validation-error = 0.002642\n",
365 | "Batch: 2400 validation-error = 0.002611\n",
366 | "Batch: 2500 validation-error = 0.002577\n",
367 | "Batch: 2600 validation-error = 0.002492\n",
368 | "Batch: 2700 validation-error = 0.002688\n",
369 | "*********** Test ***********\n",
370 | "Test Error: 0.00254873\n"
371 | ]
372 | }
373 | ],
374 | "source": [
375 | "disp_imgs = []\n",
376 | " \n",
377 | "with tf.Session() as sess:\n",
378 | " tf.global_variables_initializer().run()\n",
379 | "\n",
380 | " print(\"*********** Train ***********\")\n",
381 | "\n",
382 | " train_examples = len(mnist.train.images)\n",
383 | "\n",
384 | " for i in range(train_examples // batch_size):\n",
385 | " train_batch = mnist.train.next_batch(batch_size)\n",
386 | " _, err = sess.run([optimizer, error], feed_dict={X: train_batch[0]})\n",
387 | "\n",
388 | " if i % 100 == 0 and i != 0:\n",
389 | " validation_batch = mnist.validation.next_batch(batch_size)\n",
390 | " err = sess.run(error, feed_dict={X: validation_batch[0]})\n",
391 | " print(\"Batch: %d validation-error = %f\" % (i, err))\n",
392 | "\n",
393 | " print(\"*********** Test ***********\")\n",
394 | " test_batch = mnist.test.next_batch(batch_size)\n",
395 | " reconstructed_imgs, err = sess.run(\n",
396 | " [dconv_2_op, error], feed_dict={X: test_batch[0]})\n",
397 | " \n",
398 | " print(\"Test Error: %.8f\" % err)\n",
399 | " \n",
400 | " for i in range(images_to_plot):\n",
401 | " disp_imgs.append(test_batch[0][i].reshape(-1, 28, 28))\n",
402 | " disp_imgs.append(reconstructed_imgs[i].reshape(-1, 28, 28))"
403 | ]
404 | },
405 | {
406 | "cell_type": "markdown",
407 | "metadata": {},
408 | "source": [
409 | "#### plot output"
410 | ]
411 | },
412 | {
413 | "cell_type": "code",
414 | "execution_count": 13,
415 | "metadata": {},
416 | "outputs": [
417 | {
418 | "data": {
419 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPgAAANUCAYAAACe5+ADAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xe8VMX5x/HPAekgvUgXkCYiIiAKUtQogqgIBH+iwR6M\n2GvsBTURoyaaoNHEggW7IgpRiaiIIKCA0gwoTelNerns7497Z3bWXeDuvWfb+H2/XnkxPnfZnRvO\nc5+5c+bMBJFIBBHxU4lMd0BEUkcJLuIxJbiIx5TgIh5Tgot4TAku4jGvEzwIgolBEFyc6X6IhCkI\ngh5BECwvzGuzIsGDIFgcBMH2IAi2BEGwMgiCZ4MgqJjpfkm4cuXfOQiCu4IgeCGF7/9sEATDU/X+\nrqxI8AJ9I5FIRaAdcBTwxwz3R1Ij5/+dg3zZlDv7lHWdjEQiK4H/kH8BEARBmSAIHgqCYGkQBKuC\nIHgiCIJyBV+rGgTB2CAI1gRBsKGgXT+T/ZfCSebfueDrZwRBMDMIgp+DIFgUBEGvgnjdIAjGBEGw\nPgiChUEQXOL8nbuCIHg1CILngyDYHATBnCAIOjhfvykIgh8LvrYgCIITC973FmBQwUhjVsFrJwZB\ncF8QBJ8D24AmBSOSk37xeS84/901CILJQRBsDIJgWRAE5wdBcCkwGLix4P3fdb6PNwqu5R+CILjS\neZ9yBVV/QxAEc4GOhf3/OesSvCBBTwUWFoT+BDQn/0JoBtQD7ij4WgngGaAR0BDYDjyezv5K0STz\n7xwEQSfgeeAGoArQDVhc8PdGA8uBusAA4P4gCE5wPur0gtdUAcZQcH0EQdACGAZ0jEQilYBTgMWR\nSGQ8cD/wSiQSqRiJRI503us84FKgErDkAN9fI2Ac8BhQs+D7mhmJRP4JvAg8WPD+fQtGA+8Cswq+\n7xOBq4MgOKXg7e4Emhb87xRgyP4+O0YkEsn4/8j/x9oCbAYiwISCf5AA2Ao0dV57LPDDPt6nHbDB\n+e+JwMWZ/v70v+L9OwNPAo8keL8GQB5QyYk9ADxb0L4L+Mj5Wmtge0G7GbAaOAko9Yv3vQt44Rex\nicA9Cb6fkxL9PfJ/9XhrH/8/PAsMd/77GGDpL17zR+CZgvb3QC/na5cCywvz//lBhfsxkBZnRiKR\nj4Ig6A68BNQASgPlgRlBEJjXBUBJgCAIygOPAL2AqgVfrxQEQclIJJKXzs5LoSX970x+Ir+f4L3q\nAusjkchmJ7YE6OD890qnvQ0oGwTBQZFIZGEQBFeTn5SHB0HwH+DaSCTy0376vqww36DT50WFfG0j\noG4QBBudWEngs4J23V989n5HD66sG6JHIpFPyP8J9xCwlvxh9+GRSKRKwf8qR/InaQCuA1oAx0Qi\nkYPJH7pB/sUhWSzJf+dl5A9Pf+knoFoQBJWcWEPgx0L24aVIJNKV/ASLAH82X9rXX/nFf28l/weT\nUcdp76vPid5nGfmjlSrO/ypFIpHeBV9fQf4PDKPhPt43TtYleIFHgd8ARwBPAY8EQVALIAiCes7v\nJpXIvzA2BkFQjfzfVSR3FPbf+V/ABQWTYCUKvtYyEoksAyYDDwRBUDYIgrbARcABb3EFQdAiCIIT\ngiAoA+wg/zraW/DlVUDjQsyUzwTODoKgVMHk3QDnay8CJwVB8NsgCA4KgqB6EATtnPdv4rz2S2Bz\nwaRfuSAISgZB0CYIAjOZ9irwx4JJ5frAFQf6/oysTPBIJLKG/EmVO4CbyJ+ImRIEwc/AR+RXbci/\nQMqRXwGmAOPT31spqsL+O0cikS+BC8j/dWwT8An5VRfg/4DG5Ffzt4A7I5HIR4X4+DLkT+ytJX8Y\nX4voLbvXCv5cFwTBV/t5j9vJr9IbgLvJ/5XDfG9Lgd7kjzLXk//DwEzY/QtoXTC7/nbBr5OnkT+H\n9ENBn54GKhe8/m7yh+U/AB8Aowrx/QEQFPzSLiIeysoKLiLhUIKLeEwJLuIxJbiIx5TgIh5Tgot4\nTAku4jEluIjHknrYJAgCrYpJIBKJaO17CHR9JVac60sVXMRjSnARjynBRTymBBfxmBJcxGNKcBGP\nKcFFPKYEF/GYElzEY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTyWtsMHBwzIP9Xlkkvs8c389FP+\nOW87duywsRdffBGAlSujZ8YtXLgQkf259NJLAejUqZONlSuXf7z4zz//bGPfffcdAMuWRc/ymzRp\nEhB7ze1PhQoVbLt06dIA7Nq1y8a2bt2aVN9TSRVcxGNKcBGPJXU2WXG21Pn+++8BaNy4caFev3lz\n9MjnOXPmFPVjE1q+fDkADz74oI1Nnz69yO+nLZvCkez1VaVKFdt+7LHHAOjcubONlS+ff7Lv2rVr\nbSwvL//Y+IoVK9rYunXrAJg/f76Nbdq0CYAmTaKHgB50UP5vtKVKlbKxEiXya6Q7RJ82bRoATz75\npI39+GOhTjROSFs2iUhCaZtkM5Nrbdu2tbF58+YB0KpVKxtr3749AD169LAx81PZnRhp0MA9Dz3W\nnj17bHvNmjUAHHLIIXGvW7p0qW0Xp4JLZmzcuNG2//znPwNw4okn2lgQ5Be+ypUr21j9+vUBaNSo\nkY2ZkUDt2rVtrEWL/BOq3Qpurj/3OixZsiQAhx12mI3Vq1cPiJ0cfv7555P4zsKjCi7iMSW4iMfS\nNkSfMGFCzJ+u8ePHx8WqVq1q2+3atQNgxowZNtaxY8d9fpZ7X93c9zS/DgBUq1YNgEWLFhWq75L9\nvv3225g/D8SdKDND9NatW9uYGcqvX7/exmbPng1EJ+UATj75ZABuuOEGGzNDfTNRl0mq4CIeS9tt\nskzp378/AK+++qqNmZ/yPXv2tDH3J3WydJssHLl4ffXq1QuAe++918ZMhTdfKy7dJhORhJTgIh5L\n2yRbOtWqVcu2//GPfwDRFUcA99xzD1C8YbkIwMCBA4HYe+ivvPJKproTRxVcxGNeVvDLL7/ctmvW\nrAnAhg0bbGzBggVp75P443e/+51t9+3bF4hdx/7QQw+lvU/7ogou4jEluIjHvBqid+nSBYCbb745\n7mtnnnmmbRd2tZOIy6yoHDZsmI2ZB1rMTkTZRhVcxGNeVfDevXsDseuMzdr3L774IiN9En8MHToU\ngObNm9vYf/7zHyB2c4dsogou4jEluIjHcn6IbrbGhejifnd/rDvvvBOA3bt3p7dj4gWzwxDA0Ucf\nDcTu1GJ2kslWquAiHsv5Cu4+aH/UUUcBsRtITJ48Oe19En8MGTLEts1ea2+88YaNffXVV2nvUzJU\nwUU8pgQX8VjODtH79OkDwO23325j5gwq8zioSFF169YNgLPOOsvGzPqKuXPnZqRPRaEKLuKxnKrg\n1atXt+2//e1vQHTjeYD3338fgClTpqS3Y+IdM3lrdlcFGD16NAAjR47MSJ+KQhVcxGNKcBGP5cQQ\n3QzD3fvbhx56KBB7eIE74SaSrKuvvtq2zcEaU6dOtbH77rsv7X0qLlVwEY/lRAVv2rQpEF0L7Lr2\n2mttW0cRSVGYjUIuuOACG9uyZQsAI0aMsLFc3ChEFVzEY0pwEY9l7RDdPaD9gw8+iPu6uU85duzY\ntPVJ/GFOmAUYPnw4AHXq1LExs8ea+2BJLlIFF/FY1lbwSy+91LYbNmwY9/VPPvkEgGRORxUx3MdA\nGzRoAMCcOXNsLBdviSWiCi7iMSW4iMeybojetWtXAK644ooM90R8ZCbSevbsaWNmpeS0adNsbN26\ndentWIqogot4LOsq+PHHHw9AxYoV477mrlQzK41EktGvXz8AOnXqZGOVK1cGYPPmzRnpUyqpgot4\nTAku4rGsG6InMmvWLABOPPFEG1u/fn2muiM5zAzHd+zYYWNmj7VHHnkkI31KJVVwEY8FyawEC4JA\ny8YSiEQiQab74IN0XF8tWrQAYvf3M5O3q1atSvXHF0lxri9VcBGPKcFFPJbsEH0NsCR13clJjSKR\nSM1Md8IHur4SKtb1lVSCi0hu0RBdxGNKcBGPKcFFPKYEF/GYElzEY0pwEY8pwUU8pgQX8ZgSXMRj\nSnARjynBRTymBBfxWFJbNmnDh8S04UM4dH0lpg0fRCQhJbiIx5TgIh7LiW2TRdKtUqVKAGzbts3G\n8vLyMtWdIlMFF/FY1lXw66+/HoBy5crZWNu2bQEYMGBA3OtHjhxp21988QUAo0aNSmUXJYeZ02tr\n1oxuc9akSRMAWrZsaWO1a9cGYPny5Tb2wgsvADB58uSU9zMsquAiHlOCi3gsK042eeWVV2w70TC8\nsMwJFSeddJKNLV26tOgdKyQtdAlH2NeXmSi7/fbbbaxz585A9IwyiE6elSpVysbKlCkDwNatW23M\nXF9PP/20jY0fPz7MLiekhS4iklBGJ9lM5T5Q1Z4/fz4A//nPf2zMTIz07dvXxpo2bQrA4MGDbeyB\nBx4Ip7OSc+6++24g9nooX748AAsXLrSxr7/+GoBNmzbZWIMGDYDYM8xM7IgjjrCxdFTw4lAFF/GY\nElzEY2kfonfo0MG2+/XrF/f1OXPmAHD66afb2Nq1awHYsmWLjZUuXRqAKVOm2NiRRx4JxA6r5Ner\nY8eOAFSrVs3GzPXy+OOP29jYsWOB2LUXZ5xxBhA7vK9RowYQXZcB0V8LzQRctlEFF/FY2iv4IYcc\nYttBkD/7b6o2wCmnnALAihUr9vs+1113HQCtW7eO+9p7771X7H5KbnKrq1lHPmHCBBu75ZZbAPjq\nq6/i/q57S2zBggUA7Nixw8ZKlMivhz/99JONZWvlNlTBRTymBBfxWNqH6O+++65tN2vWDIDNmzfb\n2Pr16wv1PmeffTYQu/pIZPbs2bZ97733ArBy5Uobc+9/74958KRWrVo2tnHjRgAmTpxY3G6mjSq4\niMcyupJtyZIlSb3+hhtusO3mzZvHfX3q1Kkxf8qv26RJk5J6vVmnDtFbuO6ts//+978AjBs3LoTe\npYcquIjHlOAiHsu6HV0SOe200wC45557bMysZFu9erWN/fGPfwRi99ESORCzAtLcIwc4/PDDAZgx\nY4aNvfjii+ntWAhUwUU8lhMV3KxfN1Xb5W4W8cknn6StT5LbqlatatvnnXceELsnm7m19tZbb9mY\n+9xDrlAFF/GYElzEY1k7RH/77bdt++STT477+vPPPw/AbbfdlrY+iT+GDBli22Yr5Q0bNtjYO++8\nA0S3Ss5VquAiHsu6Cm4eJz3uuONszOxwaTZ+ABg+fDgQuwmEyIGYat2nTx8bM+vN3f3VHn300fR2\nLEVUwUU8pgQX8VjWDdHfeOMNIPG+au6ER7bvpCHZ6eqrrwZiD8eYN28eEH2YBGD79u3p7ViKqIKL\neCwrKri7g2r79u3jvm4esL/zzjvT1SXxyOWXX27bZpLNnZw1e/i9/vrr6e1YGqiCi3hMCS7isYwO\n0c1EmvuYXqI91mbOnAnonrckxzyc1KtXLxszk2cff/yxjfn8q58quIjHMlrBzeEF5ogZl7sW3eef\nsJI6l156KQCNGze2MXN71d28wecNQlTBRTymBBfxWEaH6Ndee+0+vzZs2DDb1uSaFIVZrVaxYkUb\n++yzzwCYNWtWRvqUbqrgIh7LipVsibhnOu/evbtQf2fTpk1xrze33SpXrhz3+ipVqtj2/kYTeXl5\ntn3TTTcBfk/M5DJzOi1ED8coX768jdWpUweIfRzZvWVmmGvIvW5+/vlnIPZ6MCeOVqhQwcbMasyG\nDRvamLkO3evG9MvdGdisptu5c+c+v8dkqIKLeEwJLuKxrB2iu6dEFtZrr70GwIoVK2ysdu3aAAwa\nNCiUfpntdO+7775Q3k/C5Z4GumvXrpg/AQ499FAgejotQO/evQGIRCI2ZobeZjchiJ4u6g7RS5Ys\nCUClSpVszKzQPPjgg21s7969QOxJulu3bgXgqaeesrGwhuaGKriIxzJawd9//30AzjjjjFDeb+DA\ngYV63Z49e4DoT1XXmDFjbHv69OlxXze3WSQ7mc0bIHp6rfn3BgiCAIieTQ/Rau1Oxpl17Dt27LAx\nU13Ne0C0IruV15xI6k68LV++HIBly5bZ2OjRowGYMGFCYb+9pKmCi3hMCS7iscCdWDjgi4Og8C9O\nwo033mjbiR4XNcyJj7D/SbN///vftr148eK4r5t93+bPn59MN/cpEokEB36VHEjY11eLFi0AaN26\ntY2Z++DupJhZ6ebetzb3v9etW2dj5j64O+Q397DNMN9tmwlZgK+//hoo2gq64lxfquAiHsuKCp7r\nVMHDoesrMVVwEUlICS7iMSW4iMeU4CIeU4KLeEwJLuIxJbiIx5TgIh5Tgot4TAku4jEluIjHlOAi\nHkt2R5e1wJJUdCSHNcp0Bzyi6ytesa6vpJ4mE5HcoiG6iMeU4CIeU4KLeEwJLuIxJbiIx5TgIh5T\ngot4TAku4jEluIjHlOAiHlOCi3hMCS7isaSeJtPRMonp6KJw6PpKTEcXiUhCSnARjynBRTymBBfx\nmBJcxGNKcBGPKcFFPKYEF/FYstsmZ7XmzZsDMH/+fBu76qqrAHjssccy0ifxxzHHHAPAnXfeaWNf\nf/01ALfeemtG+nQgquAiHvOqgh911FEA7N2718aWL1+eqe6IZwYNGgRAixYtbOzLL7/MVHcKRRVc\nxGNKcBGPeTVEb9euHQBbt261sbfeeitT3RHPHHLIIQCsW7fOxj777LNMdadQVMFFPJbzFbxNmza2\nPWzYMABGjRqVqe6IZ+rWrWvbNWvWBGDatGk2NmHChLT3KRmq4CIeU4KLeCznh+gtW7a07QoVKgDw\nyiuvZKo74pmzzjrLts0k28iRIzPVnaSpgot4LIhECr/PXTZuiueuJDKTIO7Em3vLLFW06WI4svH6\nGjdunG0feuihAHTq1MnGfv7555T3QZsuikhCSnARj+XsJFvjxo0B6NChg4199913QHqG5eK3evXq\nAdCqVSsbW7ZsGZCeYXlYVMFFPJazFbx79+5xsTVr1mSgJ+Kj3r17A1C2bFkbmzlzZqa6U2Sq4CIe\nU4KLeCxnh+hHHHFEXOzBBx/MQE/ER507dwZg586dNvb5559nqjtFpgou4rGcWslmfqoCvPfeewAs\nXrzYxrp06QLAjh070tovrWQLR6avrzp16ti2ub5Wr15tY6eeemra+wRaySYi+6AEF/FYTk2ynXTS\nSbZdrVo1AMaPH29j6R6ai18GDhxo21WrVgVg4sSJGepNOFTBRTyWUxX8yCOPtG0zOfj6669nqjvi\nmV69etl2uXLlAJg6dWqmuhMKVXARjynBRTyWE/fBzf1Jd7H/hg0bgNjH+TJF98HDkanry+wA9Oab\nb9qYmbDt2bOnjbkHHqST7oOLSEI5Mcl2/vnnA1CrVi0bc/fKEimO9u3bA7Hrzr/55hsgc1U7LKrg\nIh5Tgot4LCeG6I0aNYqLmUk2keIyE7Xbtm2zsblz52aqO6FSBRfxWE5U8NNOOy0u9u6772agJ+Ij\nc5usRIlovfv+++8z1Z1QqYKLeEwJLuKxrB2id+3a1bbdnTZEwvDb3/7Wtjt27AjAypUrbex///tf\n2vuUCqrgIh7L2grer18/2y5ZsiQAX3/9tY19+umnae+T+MPd3KF27doAzJ4928amT5+e9j6lgiq4\niMeU4CIey7ohevny5YHo2VAud/eWvLy8tPVJ/FO9enXb3rRpExB9wMQnquAiHsu6Cr57924gdq35\nmDFjAPjrX/+akT6Jf9zdeBcuXAjAiBEjMtWdlFEFF/GYElzEYzmxJ1u2055s4dD1lZj2ZBORhJTg\nIh5Tgot4TAku4jEluIjHlOAiHlOCi3hMCS7iMSW4iMeSfdhkLbAkFR3JYfGnMkhR6fqKV6zrK6ml\nqiKSWzREF/GYElzEY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPJbU\n02Tatzox7YseDl1fiWlfdBFJSAku4jEluIjHlOAiHlOCi3hMCS7iMSW4iMeU4CIeS3bb5IyoUKEC\nACNGjLCx3//+9wDMmDHDxgYOHAjAkiXaeVeSd/3119v2cccdB8D//vc/G7vpppvS3qfiUgUX8VhS\n+6Jnailhs2bNAJg3b17c10qUiP6MuvLKKwH4+9//np6OFdBS1XCk8/oqWbKkbQ8fPhyIjgABatWq\nBcB3331nY0899RQATz75ZDq6aGmpqogkpAQX8VjWTrLVrFnTtp977rkM9kR85E6odenSBYCDDoqm\nw+bNmwGoUaNG3Ovcid3p06entJ/FpQou4rGsq+BmouzMM8+0sU6dOhXq73br1g2InXibNWsWAJ9+\n+mlYXZQcdtpppwGx11SZMmUAWLdunY2VLl0agPLly9tYq1atALjwwgttrEGDBgC89dZbKepx8aiC\ni3hMCS7isay7D56XlwfA3r17C/V6dzie6O+YVW2DBg2yMXeSJAy6Dx6OVF1fzZs3t+1bb70VgJYt\nW9qYGYbv2rXLxsx1VbFiRRsrVaoUAOvXr7exZcuWAfDoo4/a2Mcffxxa30H3wUVkH7Jiku3999+3\nbbciF4Y7MbJlyxYAGjVqZGOHHnooAF9++aWNuauYxH9DhgyxbTNR5t6GNZNs5tYYRK+llStX2tie\nPXuA2KreunVrAG688UYbW7NmDQDffvttON9AMaiCi3hMCS7isYwO0bt37w5AixYtbMxMlB1oku2J\nJ54A4IMPPrCxTZs2AXDCCSfYmJlUcV122WUAjBw5sijdlhxhHh7p0KGDjdWpUweIPoIMsG3bNiB2\nkm3mzJkATJw40cbMcL1du3Y2dtZZZ8V9xm233QbA2WefXfxvophUwUU8lvYK3rhxY9sePXo0ELve\nNxFzq+uNN96wsbvvvhuI/vRN9HqASy+9FIidVHnwwQcBKFu2rI09/vjjAOzevfvA34TkhOOPPx6I\nnXQ1VXr58uU2tmjRIgDmzp1rY4899hgQnWxzffjhh7ZtbsG5FdxsFtG5c2cbmzJlShG/i+JRBRfx\nmBJcxGNpH6K7j+Ttb2j+ySef2LaZrFi7dm2hPsMdoj/wwAMAPPzwwzZmVi6ZoTrAmDFjgOhwTXJT\n9erVbdvsBOSurTBD888//9zGzESte80VlrluevToYWMHH3wwAL169bIxDdFFJHRZsZLNZR6gdx/J\nK2zlTsT8hB08eLCNdezYscjvJ9nNncQ1t8I2bNhgYwsWLABiJ2y/+uqrIn/eRx99BMSulDSPorrr\n3c069nRP4qqCi3hMCS7isYwO0RM9WHLMMceE+hlBEMR9VqLPveuuuwA477zzQv18SS+zUg2iD4WY\nh0QgepBBcYblru3btwOxK+MqV64MRB90guhDKWaHoXRRBRfxWNor+NChQ227sJs6FEffvn0BOOqo\no+I+1/18U8Elt7mbO5h91dzbpu4qtDCY9e7mMVSAnTt3ArBixQobS3flNlTBRTymBBfxWNqH6GbI\nnArmgRIzoQFwyy237PP1ZucN0EMmvnCH6GaSLeyh8qmnnmrbl1xyCQANGza0MbNuw10tlymq4CIe\ny7qVbMVhNne4/PLL9/u6xYsXA7F7dS1dujRl/ZL0cSupeRzY3MoqinLlytm2mSB2R6Ht27eP+SyA\njRs3ArGbkWSKKriIx5TgIh7L+SG6u+Wyu7fb/pidOyZNmpSSPknmVKpUybbNJJt7XZhdVhI9vukO\ns81EWu/evW2sbdu2QPQ8MohuuWz2A4TodsmZuvftUgUX8VjaK7hZGw6J14S7tyCMf/7znwDUrVs3\n7msHOrookVTeqpPMcndGNcdy1atXz8bMHn3uZgxmHbl7fdWqVQtIfECCe0vVTM66x2Ht79ZsuqmC\ni3hMCS7isbQP0d3DBtw90YyxY8cCiYfbBxqC7+/r5qAE8du8efNs20yuuY9tmh1fduzYYWNmt5Wt\nW7famBmGu79SmsdO3fPw5syZA8Bf/vIXGzPrLLKBKriIx9J+Pri7Cf0XX3wBxE5kmEmzopwPvmrV\nKiD2p7iZVHHXIyc6LKE4dD54OMK4vrp27Wrbt99+OxB7m6xq1apA7CYQZrWaGzPc/QDN0UWmagM8\n99xzQGpvuep8cBFJSAku4rG0D9Fd3bp1A+DMM8+0sauuugoo2hD9yiuvBODvf/97WF0sFA3RwxH2\n9XXyyScDcNppp9mY2dnHHE4A0ZVp7io4M+H2zTff2JhZ/fbiiy/amDmFNJU0RBeRhDJawRMxK4zM\n5BhEV56ZQwwgurrNvY1h1pin+9FPVfBwpOP66tmzJwBdunSxsaOPPhqIPUnU7Lo6btw4G5s/f36q\nu5eQKriIJKQEF/FY1g3Rc5GG6OHQ9ZWYhugikpASXMRjSnARjynBRTymBBfxmBJcxGNKcBGPKcFF\nPKYEF/FYsnuyrQWWHPBVvy6NDvwSKSRdX/GKdX0ltVRVRHKLhugiHlOCi3hMCS7iMSW4iMeU4CIe\nU4KLeEwJLuIxJbiIx5TgIh5Tgot4TAku4jEluIjHknqaTPtWJ6Z90cOh6ysx7YsuIgkpwUU8pgQX\n8ZgSXMRjSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPJbswQcp1759ewDefPNNG2vcuHGx3/fkk0+2\n7Xnz5gGwbNmyYr+v5KbbbrvNtsePHw/A9OnTi/x+5roFKFGiRLHfLyyq4CIeU4KLeCzrhuinnHIK\nAGXKlAn1ffv27WvbF154IQBnn312qJ8h2e+yyy4DoFOnTjY2duzYYr+v+yvgIYccAmiILiIplhUV\n/KCDot3o3bt3Sj5jxowZtn3ttdcCUKFCBRvbunVrSj5Xsstxxx0HwPLly21s5syZxX5f9xquW7cu\nAOXLl7exbdu2FfszikIVXMRjSnARj2XFEL1nz562feyxxwLw4IMPhvoZVatWte3WrVsDsUMoDdH9\ndeaZZ9q2+bVswoQJoX5G6dKlbbtatWoA7N69O9TPKApVcBGPZbSCt2nTBoCXX37ZxhYtWgTA/fff\nH+pnnXHGGaG+n+SO008/3bZ//vlnAMaNGxfqZ9SrV8+2zYSbKriIpJQSXMRjGR2imwX/7v3oXr16\nAbBly5ZQCVN2AAAgAElEQVRQPsNMeHTv3t3G9u7dG8p7S3a78847ATj88MNt7IknngBg1apVoXyG\nuXbdlXFhvXcYVMFFPJb2Cj5gwADbNqvWFi5caGNhr9+99dZbgdiqPXHiRAA2btwY6mdJ5p166qm2\nbZ5rmDt3ro0988wzoX7eTTfdBEDTpk1tLOwJvOJQBRfxmBJcxGNpH6IPHDjQts1Ksn/84x+hfoa7\nA8zgwYMByMvLs7Hhw4cD2XGfUsLlrneoVKkSAKNHj07Z5yV6OCqMx0/Dogou4rG0VfDKlSsD0Llz\n57ivjRw5MtTPuvTSS227Ro0aQHQfNoCPP/441M+T7NGuXbu42OzZs0P9DHdzB/NoqDs5/Omnn4b6\necWhCi7iMSW4iMfSNkQ3e6y5i/Ldh0zC5N6TNL799tuUfJZkF3dnldWrV8f8GZaTTjrJttesWQPA\nJ598EupnhEUVXMRjaavgmzdvBmL3v2rbti0QXS8OsH79+iJ/Rq1atYDY1XLGpEmTivy+kjt++OEH\n2zajxhNOOMHGPvzwwyK/d79+/QDo2rWrjZnbr+4eb9lEFVzEY0pwEY+lbYi+fft2ILpjC0D//v0B\neO+992zs4YcfLtT7md1gmjRpYmNmBVskEol7vR4R/XVYu3atbXfo0AGIPeDC7Pnn/ioYBAEQXTMB\nsGHDBiD2AA7zfu5KSfPAUtgTeWFRBRfxWNrXopuH8CH6k7NPnz42VthbZ+YntVut3Z/Av/Tss88m\n003JUe+8845t165dG4i9RsyOuu7Ir1y5ckDs48OLFy8GYMeOHTZmJnHdHXrNyDRTBxsciCq4iMeU\n4CIeCxJNSO3zxUFQ+BcnwX1AoFmzZoX6O6+//npc7LnnngOij4i63BVOYYtEIkHK3vxXJOzry0zE\n7tq1y8YOO+wwIDpUh+j+f2ZYDrBs2TIA6tSpY2ODBg2K+ROi+6+5p9fOnz8/lP4bxbm+VMFFPJYV\nRxe5q9uKc9Lj999/v8+vmZ/moHXpvxaJ/p2/++47IPbWbGHfw+z3lujUUFPxs40quIjHlOAiHsuK\nIXpYzH1186dLw3IpLjM0L1mypI2ZSbZsPZ1WFVzEY15VcHPLL5lbfyKFZVa8mRNKARYsWJCp7hSK\nKriIx5TgIh7zaohetmzZuJh5GECkuMxDJu4QfX9rL7KBKriIx7yq4BdccAEQ+9jfvffem6nuiGfM\n5g/u+d9Tp07NVHcKRRVcxGNKcBGPeTVEnzZtGhC7r5vOIZOwPP300wDs3LnTxiZPnpyp7hSKKriI\nx7Jiw4dcpw0fwqHrKzFt+CAiCSnBRTymBBfxmBJcxGNKcBGPKcFFPKYEF/FYsivZ1gJLUtGRHNYo\n0x3wiK6veMW6vpJa6CIiuUVDdBGPKcFFPKYEF/GYElzEY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynB\nRTymBBfxmBJcxGNJPU2mXS8T066q4dD1lZh2VRWRhJTgIh5Tgot4TAku4jEluIjHlOAiHlOCi3hM\nCS7isWS3TRb51StZsqRt5+XlZbAnB6YKLuIxryp43759ARgzZoyNDRs2DIAnnnjCxrL9p65kpwsu\nuACAiy++2Ma++eYbAIYOHZqRPh2IKriIx5TgIh5L6uiibHzap3r16rY9c+ZMAOrXrx/3uvLly9v2\n9u3bQ+2DniYLRzZeX5UqVbLtsWPHAtC1a1cb+/DDDwHo1atXyvqgp8lEJKGcn2Tr1q2bbSeq3C+/\n/DIAO3bsSFufxB8nn3yybdepUweAbdu22djy5cvT3qdkqIKLeEwJLuKxnB2ilylTBoBbb711v68b\nNWoUADoHXYqiTZs2tl2uXDkA1q1bZ2Nffvll2vuUDFVwEY/lbAU/4ogjADj66KPjvrZnzx7bHjdu\nXNr6JP5o2LAhAMcff7yN1apVC4it2u+88056O5YkVXARjynBRTyWs0P0/v377/NrH3zwQRp7Ij66\n6KKLAOjSpYuNmcdEZ82aZWOrVq1Kb8eSpAou4rGcreDuCjZj165dwIFvnYkciKncZcuWtbF58+YB\nMHLkyIz0qShUwUU8pgQX8VhODdGPO+64hG1j69atQPSxUZFktG3b1rZr1KgBwNq1a23s888/B2Du\n3Lnp7VgxqIKLeCynKnjHjh33+/VcmvyQ7OOuijTrzpcsWWJjEyZMSHufiksVXMRjSnARj+XUEL1D\nhw5xsY0bN9q2huhSHJ07d7Zts4ef+2jowoUL096n4lIFF/FYTlRws4vlOeecE/e1TZs22Xa2748l\n2ally5YAtGvXzsZq1qwJxN5ynT59eno7FgJVcBGPKcFFPJYTQ3RzuEGJEvE/j8zG8yJF1b17dwAa\nN25sY+Zay/bHQQ9EFVzEYzlRwQcMGBAXM7fHnnzyyXR3RzzTrFkzAIIgekLQihUrAJg0aVJG+hQW\nVXARjynBRTyWtUN095yxRPe/zT3vXLw3KdnF3Affu3evjX399dcAPPvss5noUmhUwUU8lrUV3N3Q\nIdHtsbfffjud3RHP9OjRw7abNGkCwO7du21s8uTJ6e5SSqiCi3hMCS7isawdopvVay53f6y//vWv\n6eyOeObEE0+07QYNGgCxu7dMnDgx3V1KCVVwEY9lbQU/5ZRT4mJLly61bfcxUZFkmZNCAXbs2AHA\nggULbCzbz/0uLFVwEY8pwUU8lnVD9FKlSgHQtGnTuK+ZoRTE3rMUSVZeXp5tmwMzNm/enKnupIwq\nuIjHsq6Cm/XA7hrzNm3aALm5q6VkJ/eW65YtW2L+9IkquIjHlOAiHsu6IbqZ/Lj11lttLBKJADBj\nxoyM9En888ILL9i22cnlo48+ylR3UkYVXMRjgamOhXpxEBT+xb8ikUgkOPCr5EB0fSVWnOtLFVzE\nY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTyW7Eq2tcCSA77q16VRpjvgEV1f8Yp1fSW10EVEcouG\n6CIeU4KLeEwJLuIxJbiIx5TgIh5Tgot4TAku4jEluIjHlOAiHlOCi3hMCS7isaQeNtGmeIlp08Vw\n6PpKTJsuikhCSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPKYEF/GYElzEY0pwEY8pwUU8luzBBylR\nq1Yt23711VcBmDx5so3985//BGDx4sWhfm7lypVtu1u3bgCMHz/exnbv3h3q50lm1K1b17avvPJK\nAPLy8mzsySefBGDp0qWhfq57XR966KEATJ06NdTPOBBVcBGPKcFFPJbRIXrVqlUBmDNnjo2ZYfOq\nVatsLFVD8xkzZthYzZo1ATj66KNtbOHChaF+rqSX+Xd+6qmnbMwMlb/44gsb27lzZ6if27JlSwBu\nvPFGG1u3bh2gIbqIhCjtFbxGjRq2/corrwBQrVo1G/vHP/4BwBVXXJGyPtx2221A9Kc5wO9//3tA\nVdsnI0aMAKBVq1Y2NmvWLABee+01G3NHi2G49dZbATjssMNsbPTo0aF+RmGpgot4TAku4rGkzgcP\nY8+sk08+2bbHjRsX9/U6deoAsGbNmuJ+VIzDDz/ctr/55hsA3nrrLRs7//zzAdi8eXPS76092cIR\nxvU1aNAg2zb3vFeuXGlj5tdCs94iLNdff71tm1/3vvrqq4T9Spb2ZBORhNI2yWZW9fTv3z/uaxdd\ndJFtp6pyf/TRR3Ffcyt4USq3ZJ/TTz/dts3tL3ekGHblrlChAgAnnHCCjZlbYn/9619D/ayiUAUX\n8ZgSXMRjaRui/+UvfwHg3HPPtTGzksy9Jxm2448/HoDatWvb2LPPPgvACy+8kLLPlfS6+OKLAejU\nqZONffvttwBMnz49ZZ97ww03ANCgQQMbe/fdd4HYB6YyRRVcxGNpq+DmdtzevXtt7KeffgJg165d\noXxGuXLlALjlllts7A9/+EPM5wNceOGFoXyeZA8zUitTpoyN7dmzB4CKFSuG8hnt2rUDYOjQoTb2\nm9/8BoAff/zRxv72t7+F8nlhUAUX8ZgSXMRjGX1ctE+fPgB88MEHNrZx40YARo4cWaj36N69u233\n6NEDgM6dO8e97vXXXy9qNyUH/PDDDwB06NDBxtq2bQtA3759bcyslHR36zGPlVaqVMnGzK+SjRo1\nsrH27dsDcOyxx9qYudf+0ksv2Zi7ci7TVMFFPJa2tehmI4W3337bxty9spzPAGInxQ7QJ9tO9He+\n//57AHr16mVjixYtKtR7F5bWooejONeXqdzDhw+3MVPB3Q1DzERs9erVbcxUYfe6MCsq3ceb69ev\nD8TeEjN/55xzzrGxBQsWFPXbSEhr0UUkISW4iMfSNslmVq2ZYRNE7yu6w2ezMsh96OS5557b5/uO\nGjXKts1uHS6zmijsYblkF7NazaxShOj1dfDBB9uYGZq7+7AtWbIEiK7LAChZsiQARxxxhI2Z4bq7\nvfJnn30GhD8sD4squIjH0r7hQ9iaNGli22Y/tZkzZ9rYKaecAoT/GKpLk2zhyKbr66CD8ge3l112\nmY3169cPiL0N9tBDDwGxmzuETZNsIpKQElzEY1lxNllx3HHHHbZtft246aabbCyVQ3Pxl3lQpVmz\nZjZmHjl2D01I5dA8DKrgIh7L2Qo+cOBAAH73u9/ZmNlXzeyJJVJUp512GhC715o5aut///tfRvpU\nFKrgIh5Tgot4LGeH6KeeempcbOzYsUD2T3xI9hs8eDAAbdq0sTGzWm3+/PkZ6VNRqIKLeCznK/jW\nrVttzOzcKlIUZu06RE+eXbt2rY1NnDgRgClTpqS1X8WhCi7iMSW4iMdyaojubldrVhWtXr3axjS5\nJsXRsWNH2y5fvjwAy5cvt7EVK1akvU/FpQou4rGcreBm3fl7770X9zp3d0yz+sh9SF8kkW7dutm2\nuYbcCm42jnDXp5tHlLOVKriIx5TgIh7LqSF6Inl5ebZtVh9dc801NjZnzhwAhgwZkt6OSc4oXbo0\nAGXLlrUxc8ZZlSpVbOyoo44CYs+2e/jhh4HY++XZRBVcxGM5X8HNudAAF110EQD/+te/bOzee+9N\ne58kt5jTbadNm2Zj5nADd4RodlotVaqUjSWzp2EmqIKLeEwJLuKxnNo2uWvXrrZ9zz33APDpp5/a\nmDmRdMOGDTZmhl+ppG2Tw5Hp68t14oknArHn55l92l5++eW09kXbJotIQjlVwbOVKng4dH0lpgou\nIgkpwUU8pgQX8ZgSXMRjSnARjynBRTymBBfxWLIPm6wFlqSiIzmsUaY74BFdX/GKdX0ltdBFRHKL\nhugiHlOCi3hMCS7iMSW4iMeU4CIeU4KLeEwJLuIxJbiIx5TgIh5Tgot4TAku4rGkHjbRpniJadPF\ncOj6SkybLopIQkpwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPKYEF/GY\nElzEY8keXZQ2Bx98sG0/8MADALRp08bGTjrpJAB2796d3o6JF4YMGWLbp556KgALFiywsbvuuguA\nXD/5RxVcxGNZV8EHDx4MwH333WdjDRo0iHudqfDr1q1LT8fEC6ZaX3755TZWv359AGrXrm1jf/jD\nHwD4+9//nsbehU8VXMRjSnARj2XFEN0MkQAeffRRAKpXr25jiSY6HnvsMQCGDRtmY+vXr09VF8UT\nl1xyCQBHHXWUjW3evBmAIIjujNS4cWMAevToYWMTJ05Mef/Cpgou4rGsqODXX3+9bVerVq1Qf2fQ\noEEA9OrVy8bMxJyp7gC7du0Ko4uSw6677jrb7tatGwAHHRS99Dds2ABA6dKlbax79+4AHHrooTZW\np04dAEaPHp26zoZMFVzEY0pwEY8FyazUCXvf6kaNGgEwe/ZsG6tYsSIA33zzjY2tWrUKiK5e25fV\nq1cDsRMoK1euDKez+6F90cMR9vVlJmrdIXWHDh0AmDVrlo3NmTMHgIYNG9pYp06dAKhRo4aNffXV\nVwBcdtllNjZ9+vQwu5yQ9kUXkYQyOsnWrl07ACpVqmRjn332GRCd5AAoW7YsAP/3f/9nY7fccgsA\nTZs2tTEzCfLOO+/YmFm5pFtovz4XXHABAC1atLCxd999F4Cbb77Zxn766ScgOnqE6C0xd9K3WbNm\nQHSCF9JTwYtDFVzEY0pwEY9ldIhepkwZIHal2iOPPBL3uh07dgDwzDPP2NjAgQMBaNKkSdzrt23b\nZtu6D/7rZSZb3evBXENmWO7asmWLbZuJX/c+uPlV0V15ae6dZ+t1pgou4rGMVnB30szo06cPAG+/\n/fZ+/6653ZHIlClTbNv9qSy/LnXr1gVg06ZNNrZkyZJ9vr5169a2XaVKFSA6eoToSGDPnj02Vr58\neUAVXEQyQAku4rGMDtFffvllAE4//XQb69ixIwAtW7a0sSOOOAKAfv362VjVqlUB2LhxY1zMPBII\nMGrUKADmzp0bat8l+61duxaI3RHITM5+9NFHNlarVi0g9vqqV68eEDukN8P1EiWiddH8GuBeh9lE\nFVzEYxldi25WCS1cuNDGKleubD7LxhL10fwEdvfWGjt2LACHHXaYjT311FMADB06NKxux9Fa9HCE\nfX2deeaZAPTv39/GzEYO7qOhZgVkXl6ejZnbZEuXLrUxs7bdVHyIrry85557wux6DK1FF5GElOAi\nHsvoEN1wHwN9/fXXgehQHaJDdHenlptuugmIvU95//33A7EPEphJEvczFi1aFFrfC/qnIXoIUnV9\n9e3b17bPP/98IHYS1zyI9Nprr9nY888/D8ROqJ1zzjlA7MMm5p64+6ti2BO6GqKLSEJZUcFdptKa\nn5YQvQVxxx132FiiFWrlypUD4KWXXrIxcwvuhRdesDH32JowqIKHIx3Xlzn+yq3gkyZNAg68OYi5\nJfbwww/bWPv27YHoBC/AtddeG05nC6iCi0hCSnARj2XdED0MZ599tm2/+OKLAPz44482ZnaSCWuX\nFw3Rw5Er15f7K97VV18NxD7QcttttwHRoX9xaYguIgl5WcHdWxtmcs29tXH33XcD4a0+UgUPR65c\nX+4qS3M7zTwvAfDpp58CcOWVV4byeargIpKQElzEY14O0V1mQu3zzz+3MbO3VqtWrWzsu+++K/Jn\naIgejly8vs4999yYPyH62LK78tJdh5EsDdFFJCHvK7jhnjA5YsQIAN58800bO++88wDYvn170u+t\nCh6OXL6+/va3v9l2z549AVi+fLmNmZNvi3LrTBVcRBJSgot47FczRK9Zs6Ztmwk3c9YURCfj3JNO\nC0tD9HDk8vVlzsCD6P1v91AOc+DCn/70p6TfW0N0EUkoo7uqptOaNWts2zySunjxYhszG0gMHjw4\nrf0SP4wbN862TeU2OwRD7OOk6aQKLuIxJbiIx341k2yJfPDBB7Z97LHHAnDMMcfYWGH31tIkWzh8\nu77M6bkAO3fuLPL7aJJNRBL61UyyJTJgwADbnjVrFhB760zHHUlxFKdqh0UVXMRjSnARj/2qJ9nC\nokm2cOj6SkyTbCKSkBJcxGNKcBGPKcFFPJbsffC1wJJUdCSHNcp0Bzyi6ytesa6vpGbRRSS3aIgu\n4jEluIjHlOAiHlOCi3hMCS7iMSW4iMeU4CIeU4KLeEwJLuIxJbiIx5TgIh5Tgot4LKmnybSlTmLa\nsikcur4S05ZNIpKQElzEY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTzm5fHBVatWte2GDRvu83VL\nlkR36L3mmmsA+Pbbb23su+++A6JHC4v8UqNG+bsaH3fccTZWvXr1uNctWrQIgK+++srGVq1aleLe\nqYKLeC3nK3ifPn1s+/TTTwegR48eNtasWbN9/l1ToSH6k7hMmTJxrytZsmRxuyk5xlwPJ5xwgo31\n6tULiL1GGjRoAECdOnVs7OCDDwZgz549NpaXlwfA119/bWP//e9/AXjggQdC7btLFVzEY0pwEY/l\nxBC9adOmAFx++eU2dskllwBQrlw5GwuC5B66ad68eQi9E1+cf/75tt2vXz8Ajj32WBurWbMmADt3\n7rSxHTt2AFCiRLRWli1bNi5mhuvt2rWzsW3btoXV9X1SBRfxWE5U8Pr16wNw1VVXhfJ+8+fPB2DO\nnDmhvJ/kDjPK69Kli42ZSdlu3brZWKtWrYDYW67r168H4Mcff7SxTZs2AbBx40YbM5NsP//8s43t\n3bsXgB9++MHGRo8eXYzvpHBUwUU8pgQX8VhGh+g1atQAYofen3/+OQDjx4+3MTOpYYZDAFu3bgWg\nQoUKNvbBBx8AsavRpk6dCsTef9y+fXvMe4ifzBqIM844w8bMEN0dPpsVZbNnz7axBQsWALBlyxYb\nW7x4MRB7HZqh+cKFC23MTLy5K9V2795djO+k6FTBRTyW9gqeqOIeeeSRNmZuT7imTJkCQPv27W3M\n/DR115ovX74ciE5oyK/PsGHDbPu8884DYkd0zzzzDACTJk1Kb8cyRBVcxGNKcBGPpW2IXrp0aQBe\neuklGzND8/vvv9/GPvroo32+hxmWu5YuXRpSDyWXnXjiiQDceeedNmYmUUeOHGljv5ahuaEKLuKx\nlFbwihUr2vYf//hHAE477TQbW7t2LQAPPfSQjaVjfa74Z+jQoUB0FRlER4PPPvtsJrqUFVTBRTym\nBBfxWEqH6GeeeaZt33zzzUDspNjxxx8PxK4MEimsWrVq2Xbnzp2B2GtpzJgxae9TtlEFF/FYSiu4\nu9Ok4a4JNyvPRIqiZ8+etm2q+fvvv29jX3zxRdr7lG1UwUU8pgQX8VhKh+gDBgyIi5mtZyG66uid\nd96xsZkzZ6ayS+IR9yEls1KySZMmNjZo0CAAnn76aRtbt25dmnqXHVTBRTwWRCKRwr84CAr/YsB9\n7/09wul+7YknngCij4hC9JFQ96H6RPupHX744UDs5Eo6JvIikUhy27lKQsleX//6179s+8ILLwRi\nN2hYtmwZELuRg9mPz+yvBrBy5Uog9rFS87oqVarYmNmfzRxiAImfjwhbca4vVXARjynBRTyW0iH6\niBEjbPvaa69N5q8Wy5o1a2x74sSJAJx99tkp+zwN0cOR7PU1ZMgQ2/7DH/4AQN26dW3M7B5k9uAD\n2Lx5MwClSpWKez93eG8eenL3UjNnkrm/Uk6bNg2AK6+8MpmuJ0VDdBFJKKUV3D2V86ijjgJiN3w4\n6KD8u3TmhEaIPe4lDOb7u+uuu2xs+PDhYX+GKngIkr2+XP379wegY8eONmZumbmPLZtr0p08q127\nNhB7rrc5Euunn36yMVP13deZXVXN5DDAHXfcUdRvIyFVcBFJSAku4rGUDtELy+ynBdFhkDukdodd\nReU+Ophoa+bi0BA9HGFfX+ZXQHMAAkTPHPvNb35jY+acMjNUh+ihBbNmzbIxc7pop06dbMzcG58w\nYYKNDR48GIDVq1eH8F1oiC4i+5AVp4u6P/0M9xxlU8HNGcsQ3cD+qaeesrGrr74agHPOOScl/ZTc\nYq4XsyrNbTdu3NjGevfuHfd3lyxZAsSueDP7vZmjiSC6tt09cTSsyh0GVXARjynBRTyWFUP0RMy5\nZQD33XcfEJ00AbjkkkuA2AkUc5B7Ito9RlxmBRpE93Fz12CYVXDNmze3sTp16sS9j3kAauzYsSnp\nZ3Gpgot4LCtukyViVhIB/Pvf/wbgt7/9baH+rvs433vvvQfAueeea2Nhnwuu22ThSOf15XrssceA\n2BGgmVAz54lDdC26e7a4qdzXXHNNyvqn22QikpASXMRjWTtEd5kVRu7eWh06dABiN783u2uMGjXK\nxtwVcamiIXo4MnV9mYdHHn74YRsz+wm6ay9WrFgBxO4Y9MgjjwCxu8aETUN0EUkoJyp4Iueddx4Q\nPbIG4O677wbSv5JIFTwcmb6+3LXo5tbshg0bbMysuBw/fnxa+6UKLiIJKcFFPJazQ/RsoiF6OHR9\nJaYhuogkpAQX8ZgSXMRjSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPJbsnmxrgSWp6EgOa5TpDnhE\n11e8Yl1fSS1VFZHcoiG6iMeU4CIeU4KLeEwJLuIxJbiIx5TgIh5Tgot4TAku4jEluIjHlOAiHlOC\ni3hMCS7isaSeJtO+1YlpX/Rw6PpKTPuii0hCSnARjynBRTymBBfxmBJcxGNKcBGPKcFFPKYEF/GY\nElzEY0pwEY8pwUU8pgQX8ZgSXMRjSnARjynBRTyW7OmiGdWsWTPbrlGjBgD9+vWzsR49egCwd+9e\nG3viiScA+Pzzz21s4cKFqeym5KgSJaL17pRTTgGgc+fONtawYUMADjoomjbLli0DYOrUqTb2zjvv\npLSfyVAFF/GYElzEY0mdD57OLXXatGlj28OGDQPgrLPOsjEzRC+sPXv22PaCBQsAmDRpko1dddVV\nAOzatSvpvmrLpnCk8/qqVauWbV977bUAdO/e3cbKlCkDwCGHHGJjBx98MABly5a1sU2bNgGwdu1a\nG5s8eTIAjz/+uI1Nnz69yH3Vlk0iklBWVPC2bdva9uWXXw7AoEGDbMz85HT9+OOPAHz22Wc29sMP\nPwBw44032tiMGTMA6NSpk42tWLECiK3qDzzwABCdlEuGKng4UnV9NWjQwLaHDBkCQJ8+fWysUaNG\nQLQaA8ybNw+AnTt32ljJkiUBqFmzpo2VL18eiJ3YNebPn2/br776KgDjxo1Luv+q4CKSkBJcxGMZ\nHaI/+eSTQOy97ESTZxMmTADgm2++sbFbbrkFgB07dsS9/uOPP7btyy67DIB///vfNtauXTsAVq1a\nZWPmHmedOnVsbM2aNYX6PjRED0fY19cdd9wBQP/+/W3M/Pu6k2JffPEFEHsve+zYsQBs2LDBxipX\nrgzE/krZoUMHAFq3bm1jTZo0AWKvZTPJdskll9jYtm3bCvV9aIguIgmlbSWbubXgToBdfPHFAARB\n9AeUqZojR460sREjRgCwdevWQn1W9erVbdtMjNx11102Nn78eCA6uSL+cP+d3Yk04+233wZgzJgx\nNt82Zm4AAAS1SURBVPbee+8V6r3NaHHatGk2ZlZXbt++3cYqVKgAxE7GmdtyFStWtLHCVvDiUAUX\n8ZgSXMRjaRuimwdBbrjhBhszQ3NzTxuiEyJffvllod7XDMEher/z+eeft7H3338fgKpVq8b9XfdX\ng1GjRgGwcePGQn2uZBczydWiRQsbq1SpEhC9pw3R4fWnn35a5M/q2bNn3Oc2b97cxszQfPPmzTZm\n7omvXr26yJ9bFKrgIh5LWwU3lTYvLy/ua+6KsmOOOQaAAQMG2FjLli3j/o6Z1GjVqpWNmbZ7C6R2\n7dr77JN7m2z48OEA7N69ez/fhWQr8wyBO6IzbbPaDODII48Eore8IPr4p3tLzKxgcyfFTLU2t1Qh\nOqHbtGnTuL58+OGHNvanP/0p+W8qBKrgIh5Tgot4LG0r2cqVKwfASy+9ZGMnnXQSEDuEMrtqJOqX\nO7x3h2KF4T4M8NZbbwFw5ZVX2ph5AKUotJItHGGsZDOr1yB6fdWrV8/GzMRbtWrVbMxcS+7KxUT3\nqM3r3GvP/HpZunRpGzNDc/f6cof/ydJKNhFJKKNr0atUqQLAzTffbGNdunQBYN26dTa2dOlSIPoQ\nPkQnS9zHQPfHfQzUrGMP65aYKng4wr6+Bg4cCMRuHmIqd6JnHlauXGnbZoWa+2xC3bp1gdjRoBn5\nuevYr7jiimL33aUKLiIJKcFFPJYVO7oUhVmtdu6558Z9zV1BZPbbevbZZ20s0b344tAQPRyZvr7c\nh0PMOoy+ffvamHnM2F2NZlZKml/7UkFDdBFJKKcOPnAfNT377LP3+bqhQ4fa9ssvv5zSPok/3FWP\n7du3B2JXUZqRn3ncGFJbucOgCi7iMSW4iMdyYohudn657bbbbMw9H8qYM2cOAG+++WZ6OiZe6Nq1\nKwDXXXedjZnHm93rzOwN6O42lO1UwUU8lrW3ydwVamZtr1lH7NqyZYttn3rqqUDsSaLpoNtk4cjU\n0VimIrsniZo15uYYIohW+JkzZ6aji5Zuk4lIQkpwEY9l7SSbu4Io0dDcbKF8+umn21i6h+aSu665\n5hrbNvupuY+Lzp07F4C//OUvNpbuoXkYVMFFPJZ1FdxUa3fVWiIvvvgiABMnTkx1l8Qj5hx4s1IN\nonuouXv5zZ49GyjaaaDZRBVcxGNKcBGPZcUQ3d2a1kxulCpVKu51ZtgEcPXVV6e+Y+IF9563WaHm\n7tRizhxzJ9lef/319HQuxVTBRTyWFRX8hBNOsO369esDiXdVdW9tJDoXXCQRt4KbfdXc00DNMwx/\n/vOfbcxdwZbLVMFFPKYEF/FYVgzR7733XttONDQfMWIEAB9//HHa+iT+OO6442zbTK65Bxt88skn\nAEyaNCm9HUsDVXARj2VFBXePkTFndrs7Vz766KNp75P4wz0N1Jwe654Zbo6y8pEquIjHlOAiHsuK\nIfrDDz8c13Yn3opz8qfI2LFjbdvc8542bZqNLVq0KO19ShdVcBGPZe2ebLlEe7KFQ9dXYtqTTUQS\nUoKLeCzZSba1wJJUdCSHNcp0Bzyi6ytesa6vpH4HF5HcoiG6iMeU4CIeU4KLeEwJLuIxJbiIx5Tg\nIh5Tgot4TAku4jEluIjH/h+BrGxlPHNv5QAAAABJRU5ErkJggg==\n",
420 | "text/plain": [
421 | ""
422 | ]
423 | },
424 | "metadata": {},
425 | "output_type": "display_data"
426 | }
427 | ],
428 | "source": [
429 | "plot_images(disp_imgs, \"Auto-encoder\", images_to_plot)\n",
430 | "plt.show()"
431 | ]
432 | }
433 | ],
434 | "metadata": {
435 | "kernelspec": {
436 | "display_name": "Python 3",
437 | "language": "python",
438 | "name": "python3"
439 | },
440 | "language_info": {
441 | "codemirror_mode": {
442 | "name": "ipython",
443 | "version": 3
444 | },
445 | "file_extension": ".py",
446 | "mimetype": "text/x-python",
447 | "name": "python",
448 | "nbconvert_exporter": "python",
449 | "pygments_lexer": "ipython3",
450 | "version": "3.6.0"
451 | }
452 | },
453 | "nbformat": 4,
454 | "nbformat_minor": 2
455 | }
456 |
--------------------------------------------------------------------------------
/Denoising-Autoencoder/code/tf_dae_mnist_reconstruction.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from tensorflow.examples.tutorials.mnist import input_data
6 | import matplotlib.pyplot as plt
7 |
8 | # Util functions
9 |
10 |
11 | # function to plot the images after during testing phase
12 | def plot_images(images, title, no_i_x, no_i_y=3):
13 | fig = plt.figure(figsize=(10, 15))
14 | fig.canvas.set_window_title(title)
15 | images = np.array(images).reshape(-1, 28, 28)
16 | for i in range(no_i_x):
17 | for j in range(no_i_y):
18 | ax = fig.add_subplot(no_i_x, no_i_y, no_i_x * j + (i + 1))
19 | ax.matshow(images[no_i_x * j + i], cmap="gray")
20 | plt.xticks(np.array([]))
21 | plt.yticks(np.array([]))
22 |
23 | if j == 0 and i == 0:
24 | ax.set_title("Real")
25 | elif j == 0 and i == 1:
26 | ax.set_title("Distorted")
27 | elif j == 0 and i == 2:
28 | ax.set_title("Reconstructed")
29 |
30 |
31 | # function to add noise to input data
32 | def add_noise(data, mean=0, stddev=0.2):
33 | try:
34 | noise = np.random.normal(mean, stddev, data.shape)
35 | except ValueError:
36 | noise = np.zeros_like(data)
37 |
38 | noisy_data = data + noise
39 | clipped_noisy_data = np.clip(noisy_data, 0.0, 1.0)
40 |
41 | return clipped_noisy_data
42 |
43 |
44 | # load the mmist dataset from tensorflow.examples
45 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
46 | features_train, features_test, features_valid = \
47 | mnist.train.images, mnist.test.images, mnist.validation.images
48 |
49 | # Neural Network Model
50 | # 2 Encoder layers + 2 Decoder layers (all dense neural layers)
51 |
52 | # Hyper-parameters
53 | n_input_layer = features_train.shape[1]
54 | n_enc_hidden_1 = 400
55 | n_enc_hidden_2 = 100 # 7.84 times compression
56 | n_dec_hidden_1 = 100
57 | n_dec_hidden_2 = 400
58 | n_output_layer = features_train.shape[1]
59 |
60 | learning_rate = 0.001
61 |
62 | n_epoch = 10
63 | batch_size = 20
64 |
65 | test_disp = 10 # no of images in plot
66 |
67 | # input/output placeholders
68 | X = tf.placeholder(tf.float32, [None, n_input_layer])
69 | Y = tf.placeholder(tf.float32, [None, n_output_layer])
70 |
71 | # Weights and biases
72 | enc_layer_1 = {
73 | 'weights':
74 | tf.Variable(tf.random_normal([n_input_layer, n_enc_hidden_1], stddev=0.1)),
75 | 'biases':
76 | tf.Variable(tf.random_normal([n_enc_hidden_1], stddev=0.1))
77 | }
78 | enc_layer_2 = {
79 | 'weights':
80 | tf.Variable(
81 | tf.random_normal([n_enc_hidden_1, n_enc_hidden_2], stddev=0.1)),
82 | 'biases':
83 | tf.Variable(tf.random_normal([n_enc_hidden_2], stddev=0.1))
84 | }
85 | dec_layer_1 = {
86 | 'weights':
87 | tf.Variable(
88 | tf.random_normal([n_dec_hidden_1, n_dec_hidden_2], stddev=0.1)),
89 | 'biases':
90 | tf.Variable(tf.random_normal([n_dec_hidden_2], stddev=0.1))
91 | }
92 | dec_layer_2 = {
93 | 'weights':
94 | tf.Variable(
95 | tf.random_normal([n_dec_hidden_2, n_output_layer], stddev=0.1)),
96 | 'biases':
97 | tf.Variable(tf.random_normal([n_output_layer], stddev=0.1))
98 | }
99 |
100 | # Model
101 |
102 | # Encoder
103 | h_enc_1 = tf.add(tf.matmul(X, enc_layer_1['weights']), enc_layer_1['biases'])
104 | enc_1 = tf.nn.sigmoid(h_enc_1)
105 |
106 | h_enc_2 = tf.add(
107 | tf.matmul(enc_1, enc_layer_2['weights']), enc_layer_2['biases'])
108 | enc_2 = tf.nn.tanh(h_enc_2)
109 |
110 | # Decoder
111 | h_dec_1 = tf.add(
112 | tf.matmul(enc_2, dec_layer_1['weights']), dec_layer_1['biases'])
113 | dec_1 = tf.nn.tanh(h_dec_1)
114 |
115 | h_dec_2 = tf.add(
116 | tf.matmul(dec_1, dec_layer_2['weights']), dec_layer_2['biases'])
117 | dec_2 = tf.nn.sigmoid(h_dec_2)
118 |
119 | # Error and Optimizer
120 |
121 | # mean-squared error
122 | cost = tf.losses.mean_squared_error(labels=Y, predictions=dec_2)
123 |
124 | # adam-optimizer
125 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
126 |
127 | # Training
128 |
129 | # Make Batches
130 | n_batch = features_train.shape[0] // batch_size
131 | batched_data = np.split(features_train, n_batch)
132 |
133 | # Start session
134 | with tf.Session() as sess:
135 | tf.global_variables_initializer().run()
136 |
137 | print("*********** Train ***********")
138 |
139 | # Epoch-training
140 | for epoch in range(n_epoch):
141 | tr_err = []
142 |
143 | # Batch training
144 | for b_idx in range(n_batch):
145 | noisy_data = add_noise(batched_data[b_idx])
146 | e, _ = sess.run([cost, optimizer], feed_dict={
147 | X: noisy_data,
148 | Y: batched_data[b_idx]
149 | })
150 |
151 | tr_err.append(e)
152 |
153 | noisy_vaild_data = add_noise(features_valid)
154 | val_err = cost.eval({X: noisy_vaild_data, Y: features_valid})
155 |
156 | train_err = sum(tr_err) / len(tr_err)
157 | print("Epoch:", epoch, " Train-error:", train_err,
158 | " Validation-error:", val_err)
159 |
160 | print("\n*********** Test ***********")
161 |
162 | # Test the model on test data and try to reconstruct it
163 | noisy_test = add_noise(features_test)
164 | reconstructed_imgs, err = sess.run([dec_2, cost], feed_dict={
165 | X: noisy_test,
166 | Y: features_test
167 | })
168 | disp_imgs = []
169 | for i in range(test_disp):
170 | disp_imgs.append(features_test[i])
171 | disp_imgs.append(noisy_test[i])
172 | disp_imgs.append(reconstructed_imgs[i])
173 |
174 | # Plot original, noisy and reconstructed images
175 | print("Test Error: %.8f" % err)
176 |
177 | # plot output
178 | plot_images(disp_imgs, "De-noising Auto-encoder", test_disp)
179 | plt.show()
180 |
--------------------------------------------------------------------------------
/Feedforward-NeuralNet/code/np_nn_iris_classification.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import random
5 | import urllib.request
6 |
7 | # Download iris dataset
8 |
9 | urllib.request.urlretrieve(
10 | "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
11 | "iris-data.txt")
12 |
13 | # Pre-process data
14 |
15 | # seed random-generators
16 | random.seed(0)
17 | np.random.seed(0)
18 |
19 | train_test_ratio = 0.8
20 |
21 | tmp_list = []
22 | tmp_set = set()
23 | features = []
24 | labels = []
25 |
26 | # text-file to numpy arrays
27 | with open("iris-data.txt") as f:
28 | for line in f.readlines():
29 | if not line.isspace():
30 | tmp_list.append(line)
31 |
32 | random.shuffle(tmp_list)
33 |
34 | for line in tmp_list:
35 | split_line = line.strip().split(',')
36 | length_line = len(split_line)
37 |
38 | for i in range(length_line - 1):
39 | split_line[i] = float(split_line[i])
40 |
41 | label = split_line[length_line - 1]
42 | tmp_set.add(label)
43 |
44 | features.append(split_line[:length_line - 1])
45 | labels.append(label)
46 |
47 | # Scale data
48 | max_val = max([item for i in features for item in i])
49 | min_val = min([item for i in features for item in i])
50 |
51 | for i in range(len(features)):
52 | for j in range(len(features[0])):
53 | features[i][j] = (features[i][j] - min_val) / (max_val - min_val)
54 |
55 | # One-hot encoding
56 | tmp_list = list(tmp_set)
57 | for i in range(len(labels)):
58 | labels[i] = tmp_list.index(labels[i])
59 |
60 | label_idx = np.array(labels)
61 | labels = np.zeros((len(labels), len(tmp_list)))
62 | labels[np.arange(len(labels)), label_idx] = 1
63 |
64 | # split into train-test set
65 | features_train = np.array(features[:int(train_test_ratio * len(features))])
66 | features_test = np.array(features[int(train_test_ratio * len(features)):])
67 |
68 | labels_train = labels[:int(train_test_ratio * len(labels))]
69 | labels_test = labels[int(train_test_ratio * len(labels)):]
70 |
71 | # Neural Network
72 |
73 | # hyper-parameters
74 | n_input_layers = len(features_test[0])
75 | n_hidden_layers = 5
76 | n_output_layers = len(tmp_list)
77 |
78 | learning_rate = 0.01
79 | momentum = 0.9
80 |
81 | n_epoch = 100
82 |
83 | # Activation Functions and their derivative
84 | activation_f = {
85 | 'identity': lambda x: x,
86 | 'sigmoid': lambda x: 1.0 / (1.0 + np.exp(-x)),
87 | 'tanh': lambda x: np.tanh(x),
88 | 'relu': lambda x: x * (x > 0),
89 | }
90 |
91 | activation_f_prime = {
92 | 'identity': lambda x: 1,
93 | 'sigmoid': lambda x: x * (1.0 - x),
94 | 'tanh': lambda x: 1 - x**2,
95 | 'relu': lambda x: 1.0 * (x > 0),
96 | }
97 |
98 | # Activation Function Parameters
99 | f1 = 'tanh'
100 | f2 = 'sigmoid'
101 |
102 | act_f1 = activation_f[f1]
103 | act_f2 = activation_f[f2]
104 |
105 | act_f1_prime = activation_f_prime[f1]
106 | act_f2_prime = activation_f_prime[f2]
107 |
108 |
109 | # Training Function
110 | def train(input_features, output_label, i_h_weights, h_o_weights):
111 | input_features = input_features.reshape(1, -1)
112 |
113 | # forward prop
114 | h_inter = np.dot(input_features, i_h_weights)
115 | h_result = act_f1(h_inter)
116 | o_inter = np.dot(h_result, h_o_weights)
117 | o_result = act_f2(o_inter)
118 |
119 | error = np.mean(0.5 * np.square(o_result - output_label))
120 |
121 | # back prop
122 | del_h_o = -np.multiply(output_label - o_result, act_f2_prime(o_result))
123 | change_h_o = np.dot(h_result.T, del_h_o)
124 | del_i_h = np.dot(del_h_o, h_o_weights.T) * act_f1_prime(h_result)
125 | change_i_h = np.dot(input_features.T, del_i_h)
126 |
127 | return error, change_i_h, change_h_o
128 |
129 |
130 | # Predict Function
131 | def predict(input_features, i_h_weights, h_o_weights):
132 | # uses just forward prop
133 | h_inter = np.dot(input_features, i_h_weights)
134 | h_result = act_f1(h_inter)
135 | o_inter = np.dot(h_result, h_o_weights)
136 | o_result = act_f2(o_inter)
137 | return (o_result >= max(o_result)).astype(int)
138 |
139 |
140 | # Train Neural Network
141 |
142 | print("*********** Train ***********")
143 |
144 | # Initial Random Weights
145 | V = np.random.normal(scale=0.1, size=(n_input_layers, n_hidden_layers))
146 | W = np.random.normal(scale=0.1, size=(n_hidden_layers, n_output_layers))
147 |
148 | # Training-set
149 | X = features_train
150 | T = labels_train
151 |
152 | # Epoch-training
153 | for epoch in range(n_epoch):
154 | tr_err = []
155 |
156 | for i in range(X.shape[0]):
157 | loss, grad_V, grad_W = train(X[i], T[i], V, W)
158 |
159 | # Adjust Weights
160 | V -= learning_rate * grad_V + momentum * grad_V
161 | W -= learning_rate * grad_W + momentum * grad_W
162 |
163 | tr_err.append(loss)
164 |
165 | if epoch % 10 == 0:
166 | val_err = []
167 |
168 | # use test set as validiation set
169 | for i in range(features_test.shape[0]):
170 | loss, _, _ = train(features_test[i], labels_test[i], V, W)
171 | val_err.append(loss)
172 |
173 | train_error = sum(tr_err) / len(tr_err)
174 | valid_error = sum(val_err) / len(val_err)
175 |
176 | print("Epoch:", epoch, " Train-error:", train_error,
177 | " Validation-error:", valid_error)
178 |
179 | # Test Neural Network
180 |
181 | print("*********** Test ***********")
182 |
183 | success = 0
184 | for i in range(len(features_test)):
185 | a = predict(features_test[i], V, W)
186 | b = labels_test[i]
187 | if np.array_equal(a, b):
188 | success += 1
189 |
190 | print("Total = %d Success = %d Accuracy = %f" %
191 | (len(features_test), success, success * 100 / len(features_test)))
192 |
--------------------------------------------------------------------------------
/Feedforward-NeuralNet/code/tf_nn_iris_classification.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import random
5 | import tensorflow as tf
6 | import urllib.request
7 |
8 | # Download iris dataset
9 |
10 | urllib.request.urlretrieve(
11 | "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
12 | "iris-data.txt")
13 |
14 | # Pre-process data
15 |
16 | # seed random-generators
17 | random.seed(0)
18 | np.random.seed(0)
19 |
20 | train_test_ratio = 0.8
21 |
22 | tmp_list = []
23 | tmp_set = set()
24 | features = []
25 | labels = []
26 |
27 | # text-file to numpy arrays
28 | with open("iris-data.txt") as f:
29 | for line in f.readlines():
30 | if not line.isspace():
31 | tmp_list.append(line)
32 |
33 | random.shuffle(tmp_list)
34 |
35 | for line in tmp_list:
36 | split_line = line.strip().split(',')
37 | length_line = len(split_line)
38 |
39 | for i in range(length_line - 1):
40 | split_line[i] = float(split_line[i])
41 |
42 | label = split_line[length_line - 1]
43 | tmp_set.add(label)
44 |
45 | features.append(split_line[:length_line - 1])
46 | labels.append(label)
47 |
48 | # Scale data
49 | max_val = max([item for i in features for item in i])
50 | min_val = min([item for i in features for item in i])
51 |
52 | for i in range(len(features)):
53 | for j in range(len(features[0])):
54 | features[i][j] = (features[i][j] - min_val) / (max_val - min_val)
55 |
56 | # One-hot encoding
57 | tmp_list = list(tmp_set)
58 | for i in range(len(labels)):
59 | labels[i] = tmp_list.index(labels[i])
60 |
61 | label_idx = np.array(labels)
62 | labels = np.zeros((len(labels), len(tmp_list)))
63 | labels[np.arange(len(labels)), label_idx] = 1
64 |
65 | # split into train-test set
66 | features_train = np.array(features[:int(train_test_ratio * len(features))])
67 | features_test = np.array(features[int(train_test_ratio * len(features)):])
68 |
69 | labels_train = labels[:int(train_test_ratio * len(labels))]
70 | labels_test = labels[int(train_test_ratio * len(labels)):]
71 |
72 | # Neural Network
73 |
74 | # hyper-parameters
75 | n_input_layers = len(features_test[0])
76 | n_hidden_layers_1 = 5
77 | n_output_layers = len(tmp_list)
78 |
79 | learning_rate = 0.01
80 | momentum = 0.9
81 |
82 | n_epochs = 100
83 |
84 | # input/output placeholders
85 | X = tf.placeholder(tf.float32, [None, n_input_layers])
86 | Y = tf.placeholder(tf.float32)
87 |
88 | # Weights and biases
89 | layer_1 = {
90 | 'weights':
91 | tf.Variable(
92 | tf.random_normal([n_input_layers, n_hidden_layers_1], stddev=0.1)),
93 | 'biases':
94 | tf.Variable(tf.random_normal([n_hidden_layers_1], stddev=0.1))
95 | }
96 | layer_op = {
97 | 'weights':
98 | tf.Variable(
99 | tf.random_normal([n_hidden_layers_1, n_output_layers], stddev=0.1)),
100 | 'biases':
101 | tf.Variable(tf.random_normal([n_output_layers], stddev=0.1))
102 | }
103 |
104 | # Model
105 | h_l1 = tf.nn.xw_plus_b(X, layer_1['weights'], layer_1['biases'])
106 | l1 = tf.nn.tanh(h_l1)
107 |
108 | h_l2 = tf.nn.xw_plus_b(l1, layer_op['weights'], layer_op['biases'])
109 | op = tf.nn.sigmoid(h_l2)
110 |
111 | # Error and Optimizer
112 |
113 | # mean-squared error
114 | err = tf.losses.mean_squared_error(predictions=op, labels=Y)
115 |
116 | # gradient-descent-with-momentum-optimizer
117 | optimizer = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(err)
118 |
119 | # Start Session
120 | with tf.Session() as sess:
121 |
122 | tf.global_variables_initializer().run()
123 |
124 | print("*********** Train ***********")
125 |
126 | # Epoch training
127 | for epoch in range(n_epochs):
128 |
129 | tr_err = []
130 |
131 | for i in range(len(features_train)):
132 | _, error = sess.run([optimizer, err], feed_dict={
133 | X: features_train[i].reshape(1, -1),
134 | Y: labels_train[i]
135 | })
136 | tr_err.append(error)
137 |
138 | if epoch % 10 == 0:
139 | # use test set for validation
140 | val_err = err.eval({X: features_test, Y: labels_test})
141 | train_err = sum(tr_err) / len(tr_err)
142 | print("Epoch:", epoch, " Train-error:", train_err,
143 | " Validation-error:", val_err)
144 |
145 | print("*********** Test ***********")
146 |
147 | correct = tf.equal(tf.argmax(op, 1), tf.argmax(Y, 1))
148 | accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
149 | test_error = err.eval({X: features_test, Y: labels_test})
150 | test_accuracy = accuracy.eval({X: features_test, Y: labels_test}) * 100
151 | print('Test-error:', test_error, 'Accuracy:', test_accuracy)
152 |
--------------------------------------------------------------------------------
/Feedforward-NeuralNet/np_nn_iris_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Artificial Neural Network\n",
8 | "###### iris classification using numpy"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import random\n",
28 | "import urllib.request"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "#### Download iris dataset"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 2,
41 | "metadata": {},
42 | "outputs": [
43 | {
44 | "data": {
45 | "text/plain": [
46 | "('iris-data.txt', )"
47 | ]
48 | },
49 | "execution_count": 2,
50 | "metadata": {},
51 | "output_type": "execute_result"
52 | }
53 | ],
54 | "source": [
55 | "urllib.request.urlretrieve(\n",
56 | " \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", \n",
57 | " \"iris-data.txt\")"
58 | ]
59 | },
60 | {
61 | "cell_type": "markdown",
62 | "metadata": {},
63 | "source": [
64 | "### Pre-process data"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "seed random-generators"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 3,
77 | "metadata": {
78 | "collapsed": true
79 | },
80 | "outputs": [],
81 | "source": [
82 | "random.seed(0)\n",
83 | "np.random.seed(0)"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 4,
89 | "metadata": {
90 | "collapsed": true
91 | },
92 | "outputs": [],
93 | "source": [
94 | "train_test_ratio = 0.8\n",
95 | "\n",
96 | "tmp_list = []\n",
97 | "tmp_set = set()\n",
98 | "features = []\n",
99 | "labels = []"
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "text-file to numpy arrays"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 5,
112 | "metadata": {
113 | "collapsed": true
114 | },
115 | "outputs": [],
116 | "source": [
117 | "with open(\"iris-data.txt\") as f:\n",
118 | " for line in f.readlines():\n",
119 | " if not line.isspace():\n",
120 | " tmp_list.append(line)\n",
121 | "\n",
122 | " random.shuffle(tmp_list)\n",
123 | "\n",
124 | "for line in tmp_list:\n",
125 | " split_line = line.strip().split(',')\n",
126 | " length_line = len(split_line)\n",
127 | "\n",
128 | " for i in range(length_line - 1):\n",
129 | " split_line[i] = float(split_line[i])\n",
130 | "\n",
131 | " label = split_line[length_line - 1]\n",
132 | " tmp_set.add(label)\n",
133 | "\n",
134 | " features.append(split_line[:length_line - 1])\n",
135 | " labels.append(label)"
136 | ]
137 | },
138 | {
139 | "cell_type": "markdown",
140 | "metadata": {},
141 | "source": [
142 | "Scale data"
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": 6,
148 | "metadata": {
149 | "collapsed": true
150 | },
151 | "outputs": [],
152 | "source": [
153 | "max_val = max([item for i in features for item in i])\n",
154 | "min_val = min([item for i in features for item in i])\n",
155 | "\n",
156 | "for i in range(len(features)):\n",
157 | " for j in range(len(features[0])):\n",
158 | " features[i][j] = (features[i][j] - min_val) / (max_val - min_val)"
159 | ]
160 | },
161 | {
162 | "cell_type": "markdown",
163 | "metadata": {},
164 | "source": [
165 | "One-hot encoding"
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": 7,
171 | "metadata": {
172 | "collapsed": true
173 | },
174 | "outputs": [],
175 | "source": [
176 | "tmp_list = list(tmp_set)\n",
177 | "for i in range(len(labels)):\n",
178 | " labels[i] = tmp_list.index(labels[i])\n",
179 | "\n",
180 | "label_idx = np.array(labels)\n",
181 | "labels = np.zeros((len(labels), len(tmp_list)))\n",
182 | "labels[np.arange(len(labels)), label_idx] = 1"
183 | ]
184 | },
185 | {
186 | "cell_type": "markdown",
187 | "metadata": {},
188 | "source": [
189 | "split into train-test set"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": 8,
195 | "metadata": {
196 | "collapsed": true
197 | },
198 | "outputs": [],
199 | "source": [
200 | "features_train = np.array(features[:int(train_test_ratio * len(features))])\n",
201 | "features_test = np.array(features[int(train_test_ratio * len(features)):])\n",
202 | "\n",
203 | "labels_train = labels[:int(train_test_ratio * len(labels))]\n",
204 | "labels_test = labels[int(train_test_ratio * len(labels)):]"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "## Neural Network"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "hyper-parameters"
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": 9,
224 | "metadata": {
225 | "collapsed": true
226 | },
227 | "outputs": [],
228 | "source": [
229 | "n_input_layers = len(features_test[0])\n",
230 | "n_hidden_layers = 5\n",
231 | "n_output_layers = len(tmp_list)\n",
232 | "\n",
233 | "learning_rate = 0.01\n",
234 | "momentum = 0.9\n",
235 | "\n",
236 | "n_epoch = 100"
237 | ]
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "Activation Functions and their derivative"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": 10,
249 | "metadata": {
250 | "collapsed": true
251 | },
252 | "outputs": [],
253 | "source": [
254 | "activation_f = {\n",
255 | " 'identity': lambda x: x,\n",
256 | " 'sigmoid': lambda x: 1.0 / (1.0 + np.exp(-x)),\n",
257 | " 'tanh': lambda x: np.tanh(x),\n",
258 | " 'relu': lambda x: x * (x > 0),\n",
259 | "}\n",
260 | "\n",
261 | "activation_f_prime = {\n",
262 | " 'identity': lambda x: 1,\n",
263 | " 'sigmoid': lambda x: x * (1.0 - x),\n",
264 | " 'tanh': lambda x: 1 - x**2,\n",
265 | " 'relu': lambda x: 1.0 * (x > 0),\n",
266 | "}"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {},
272 | "source": [
273 | "Activation Function Parameters"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": 11,
279 | "metadata": {
280 | "collapsed": true
281 | },
282 | "outputs": [],
283 | "source": [
284 | "f1 = 'tanh'\n",
285 | "f2 = 'sigmoid'\n",
286 | "\n",
287 | "act_f1 = activation_f[f1]\n",
288 | "act_f2 = activation_f[f2]\n",
289 | "\n",
290 | "act_f1_prime = activation_f_prime[f1]\n",
291 | "act_f2_prime = activation_f_prime[f2]"
292 | ]
293 | },
294 | {
295 | "cell_type": "markdown",
296 | "metadata": {},
297 | "source": [
298 | "#### Training Function"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": 12,
304 | "metadata": {
305 | "collapsed": true
306 | },
307 | "outputs": [],
308 | "source": [
309 | "def train(input_features, output_label, i_h_weights, h_o_weights):\n",
310 | " input_features = input_features.reshape(1, -1)\n",
311 | "\n",
312 | " # forward prop\n",
313 | " h_inter = np.dot(input_features, i_h_weights)\n",
314 | " h_result = act_f1(h_inter)\n",
315 | " o_inter = np.dot(h_result, h_o_weights)\n",
316 | " o_result = act_f2(o_inter)\n",
317 | "\n",
318 | " error = np.mean(0.5 * np.square(o_result - output_label))\n",
319 | "\n",
320 | " # back prop\n",
321 | " del_h_o = -np.multiply(output_label - o_result, act_f2_prime(o_result))\n",
322 | " change_h_o = np.dot(h_result.T, del_h_o)\n",
323 | " del_i_h = np.dot(del_h_o, h_o_weights.T) * act_f1_prime(h_result)\n",
324 | " change_i_h = np.dot(input_features.T, del_i_h)\n",
325 | "\n",
326 | " return error, change_i_h, change_h_o"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "#### Predict Function"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "execution_count": 13,
339 | "metadata": {
340 | "collapsed": true
341 | },
342 | "outputs": [],
343 | "source": [
344 | "# uses just forward prop\n",
345 | "def predict(input_features, i_h_weights, h_o_weights):\n",
346 | " h_inter = np.dot(input_features, i_h_weights)\n",
347 | " h_result = act_f1(h_inter)\n",
348 | " o_inter = np.dot(h_result, h_o_weights)\n",
349 | " o_result = act_f2(o_inter)\n",
350 | " return (o_result >= max(o_result)).astype(int)"
351 | ]
352 | },
353 | {
354 | "cell_type": "markdown",
355 | "metadata": {},
356 | "source": [
357 | "### Train Neural Network"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": 14,
363 | "metadata": {},
364 | "outputs": [
365 | {
366 | "name": "stdout",
367 | "output_type": "stream",
368 | "text": [
369 | "*********** Train ***********\n",
370 | "Epoch: 0 Train-error: 0.110355817438 Validation-error: 0.0890507943417\n",
371 | "Epoch: 10 Train-error: 0.0426840919513 Validation-error: 0.0397109277993\n",
372 | "Epoch: 20 Train-error: 0.0221095706162 Validation-error: 0.020960218994\n",
373 | "Epoch: 30 Train-error: 0.016501428106 Validation-error: 0.0114677158671\n",
374 | "Epoch: 40 Train-error: 0.0143184820028 Validation-error: 0.0135157142136\n",
375 | "Epoch: 50 Train-error: 0.0120528834448 Validation-error: 0.00838103175258\n",
376 | "Epoch: 60 Train-error: 0.00950958946663 Validation-error: 0.0203980764745\n",
377 | "Epoch: 70 Train-error: 0.00861524656063 Validation-error: 0.0084052920011\n",
378 | "Epoch: 80 Train-error: 0.0084945926808 Validation-error: 0.00795761149001\n",
379 | "Epoch: 90 Train-error: 0.00819203245711 Validation-error: 0.00802635459742\n"
380 | ]
381 | }
382 | ],
383 | "source": [
384 | "print(\"*********** Train ***********\")\n",
385 | "\n",
386 | "# Initial Random Weights\n",
387 | "V = np.random.normal(scale=0.1, size=(n_input_layers, n_hidden_layers))\n",
388 | "W = np.random.normal(scale=0.1, size=(n_hidden_layers, n_output_layers))\n",
389 | "\n",
390 | "# Training-set\n",
391 | "X = features_train\n",
392 | "T = labels_train\n",
393 | "\n",
394 | "# Epoch-training\n",
395 | "for epoch in range(n_epoch):\n",
396 | " tr_err = []\n",
397 | "\n",
398 | " for i in range(X.shape[0]):\n",
399 | " loss, grad_V, grad_W = train(X[i], T[i], V, W)\n",
400 | "\n",
401 | " # Adjust Weights\n",
402 | " V -= learning_rate * grad_V + momentum * grad_V\n",
403 | " W -= learning_rate * grad_W + momentum * grad_W\n",
404 | "\n",
405 | " tr_err.append(loss)\n",
406 | " \n",
407 | " if epoch % 10 == 0:\n",
408 | " val_err = []\n",
409 | " \n",
410 | " # use test set as validiation set\n",
411 | " for i in range(features_test.shape[0]):\n",
412 | " loss, _, _ = train(features_test[i], labels_test[i], V, W)\n",
413 | " val_err.append(loss)\n",
414 | " \n",
415 | " train_error = sum(tr_err) / len(tr_err)\n",
416 | " valid_error = sum(val_err) / len(val_err)\n",
417 | " \n",
418 | " print(\"Epoch:\", epoch, \" Train-error:\", train_error, \" Validation-error:\", valid_error)"
419 | ]
420 | },
421 | {
422 | "cell_type": "markdown",
423 | "metadata": {},
424 | "source": [
425 | "### Test Neural Network"
426 | ]
427 | },
428 | {
429 | "cell_type": "code",
430 | "execution_count": 15,
431 | "metadata": {},
432 | "outputs": [
433 | {
434 | "name": "stdout",
435 | "output_type": "stream",
436 | "text": [
437 | "*********** Test ***********\n",
438 | "Total = 30 Success = 29 Accuracy = 96.666667\n"
439 | ]
440 | }
441 | ],
442 | "source": [
443 | "print(\"*********** Test ***********\")\n",
444 | "\n",
445 | "success = 0\n",
446 | "for i in range(len(features_test)):\n",
447 | " a = predict(features_test[i], V, W)\n",
448 | " b = labels_test[i]\n",
449 | " if np.array_equal(a, b):\n",
450 | " success += 1\n",
451 | "\n",
452 | "print(\"Total = %d Success = %d Accuracy = %f\" %\n",
453 | " (len(features_test), success, success * 100 / len(features_test)))"
454 | ]
455 | }
456 | ],
457 | "metadata": {
458 | "kernelspec": {
459 | "display_name": "Python 3",
460 | "language": "python",
461 | "name": "python3"
462 | },
463 | "language_info": {
464 | "codemirror_mode": {
465 | "name": "ipython",
466 | "version": 3
467 | },
468 | "file_extension": ".py",
469 | "mimetype": "text/x-python",
470 | "name": "python",
471 | "nbconvert_exporter": "python",
472 | "pygments_lexer": "ipython3",
473 | "version": "3.6.2"
474 | }
475 | },
476 | "nbformat": 4,
477 | "nbformat_minor": 2
478 | }
479 |
--------------------------------------------------------------------------------
/Feedforward-NeuralNet/tf_nn_iris_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Artificial Neural Network\n",
8 | "###### iris classification using Tensorflow"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import random\n",
28 | "import tensorflow as tf\n",
29 | "import urllib.request"
30 | ]
31 | },
32 | {
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "Download iris dataset"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 2,
42 | "metadata": {},
43 | "outputs": [
44 | {
45 | "data": {
46 | "text/plain": [
47 | "('iris-data.txt', )"
48 | ]
49 | },
50 | "execution_count": 2,
51 | "metadata": {},
52 | "output_type": "execute_result"
53 | }
54 | ],
55 | "source": [
56 | "urllib.request.urlretrieve(\n",
57 | " \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", \n",
58 | " \"iris-data.txt\")"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "### Pre-process data"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "seed random-generators"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 3,
78 | "metadata": {
79 | "collapsed": true
80 | },
81 | "outputs": [],
82 | "source": [
83 | "random.seed(0)\n",
84 | "np.random.seed(0)"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": 4,
90 | "metadata": {
91 | "collapsed": true
92 | },
93 | "outputs": [],
94 | "source": [
95 | "train_test_ratio = 0.8\n",
96 | "\n",
97 | "tmp_list = []\n",
98 | "tmp_set = set()\n",
99 | "features = []\n",
100 | "labels = []"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "text-file to numpy arrays"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 5,
113 | "metadata": {
114 | "collapsed": true
115 | },
116 | "outputs": [],
117 | "source": [
118 | "with open(\"iris-data.txt\") as f:\n",
119 | " for line in f.readlines():\n",
120 | " if not line.isspace():\n",
121 | " tmp_list.append(line)\n",
122 | "\n",
123 | " random.shuffle(tmp_list)\n",
124 | "\n",
125 | "for line in tmp_list:\n",
126 | " split_line = line.strip().split(',')\n",
127 | " length_line = len(split_line)\n",
128 | "\n",
129 | " for i in range(length_line - 1):\n",
130 | " split_line[i] = float(split_line[i])\n",
131 | "\n",
132 | " label = split_line[length_line - 1]\n",
133 | " tmp_set.add(label)\n",
134 | "\n",
135 | " features.append(split_line[:length_line - 1])\n",
136 | " labels.append(label)"
137 | ]
138 | },
139 | {
140 | "cell_type": "markdown",
141 | "metadata": {},
142 | "source": [
143 | "Scale data"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": 6,
149 | "metadata": {
150 | "collapsed": true
151 | },
152 | "outputs": [],
153 | "source": [
154 | "max_val = max([item for i in features for item in i])\n",
155 | "min_val = min([item for i in features for item in i])\n",
156 | "\n",
157 | "for i in range(len(features)):\n",
158 | " for j in range(len(features[0])):\n",
159 | " features[i][j] = (features[i][j] - min_val) / (max_val - min_val)"
160 | ]
161 | },
162 | {
163 | "cell_type": "markdown",
164 | "metadata": {},
165 | "source": [
166 | "One-hot encoding"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 7,
172 | "metadata": {
173 | "collapsed": true
174 | },
175 | "outputs": [],
176 | "source": [
177 | "tmp_list = list(tmp_set)\n",
178 | "for i in range(len(labels)):\n",
179 | " labels[i] = tmp_list.index(labels[i])\n",
180 | "\n",
181 | "label_idx = np.array(labels)\n",
182 | "labels = np.zeros((len(labels), len(tmp_list)))\n",
183 | "labels[np.arange(len(labels)), label_idx] = 1"
184 | ]
185 | },
186 | {
187 | "cell_type": "markdown",
188 | "metadata": {},
189 | "source": [
190 | "split into train-test set"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": 8,
196 | "metadata": {
197 | "collapsed": true
198 | },
199 | "outputs": [],
200 | "source": [
201 | "features_train = np.array(features[:int(train_test_ratio * len(features))])\n",
202 | "features_test = np.array(features[int(train_test_ratio * len(features)):])\n",
203 | "\n",
204 | "labels_train = labels[:int(train_test_ratio * len(labels))]\n",
205 | "labels_test = labels[int(train_test_ratio * len(labels)):]"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "metadata": {},
211 | "source": [
212 | "## Neural Network"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "hyper-parameters"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": 9,
225 | "metadata": {
226 | "collapsed": true
227 | },
228 | "outputs": [],
229 | "source": [
230 | "n_input_layers = len(features_test[0])\n",
231 | "n_hidden_layers_1 = 5\n",
232 | "n_output_layers = len(tmp_list)\n",
233 | "\n",
234 | "learning_rate = 0.01\n",
235 | "momentum = 0.9\n",
236 | "\n",
237 | "n_epochs = 100"
238 | ]
239 | },
240 | {
241 | "cell_type": "markdown",
242 | "metadata": {},
243 | "source": [
244 | "input/output placeholders"
245 | ]
246 | },
247 | {
248 | "cell_type": "code",
249 | "execution_count": 10,
250 | "metadata": {
251 | "collapsed": true
252 | },
253 | "outputs": [],
254 | "source": [
255 | "X = tf.placeholder(tf.float32, [None, n_input_layers])\n",
256 | "Y = tf.placeholder(tf.float32)"
257 | ]
258 | },
259 | {
260 | "cell_type": "markdown",
261 | "metadata": {},
262 | "source": [
263 | "Weights and biases"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": 11,
269 | "metadata": {
270 | "collapsed": true
271 | },
272 | "outputs": [],
273 | "source": [
274 | "layer_1 = {\n",
275 | " 'weights': tf.Variable(tf.random_normal([n_input_layers, n_hidden_layers_1], stddev=0.1)),\n",
276 | " 'biases': tf.Variable(tf.random_normal([n_hidden_layers_1], stddev=0.1))\n",
277 | "}\n",
278 | "layer_op = {\n",
279 | " 'weights': tf.Variable(tf.random_normal([n_hidden_layers_1, n_output_layers], stddev=0.1)),\n",
280 | " 'biases': tf.Variable(tf.random_normal([n_output_layers], stddev=0.1))\n",
281 | "}"
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "metadata": {},
287 | "source": [
288 | "#### Model"
289 | ]
290 | },
291 | {
292 | "cell_type": "code",
293 | "execution_count": 12,
294 | "metadata": {
295 | "collapsed": true
296 | },
297 | "outputs": [],
298 | "source": [
299 | "h_l1 = tf.nn.xw_plus_b(X, layer_1['weights'], layer_1['biases'])\n",
300 | "l1 = tf.nn.tanh(h_l1)\n",
301 | "\n",
302 | "h_l2 = tf.nn.xw_plus_b(l1, layer_op['weights'], layer_op['biases'])\n",
303 | "op = tf.nn.sigmoid(h_l2)"
304 | ]
305 | },
306 | {
307 | "cell_type": "markdown",
308 | "metadata": {},
309 | "source": [
310 | "#### Error and Optimizer"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": 13,
316 | "metadata": {
317 | "collapsed": true
318 | },
319 | "outputs": [],
320 | "source": [
321 | "# mean-squared error\n",
322 | "err = tf.losses.mean_squared_error(predictions=op, labels=Y)\n",
323 | "\n",
324 | "# gradient-descent-with-momentum-optimizer\n",
325 | "optimizer = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(err)"
326 | ]
327 | },
328 | {
329 | "cell_type": "markdown",
330 | "metadata": {},
331 | "source": [
332 | "#### Start Session"
333 | ]
334 | },
335 | {
336 | "cell_type": "code",
337 | "execution_count": 14,
338 | "metadata": {},
339 | "outputs": [
340 | {
341 | "name": "stdout",
342 | "output_type": "stream",
343 | "text": [
344 | "*********** Train ***********\n",
345 | "Epoch: 0 Train-error: 0.238889564698 Validation-error: 0.230705\n",
346 | "Epoch: 10 Train-error: 0.175986521132 Validation-error: 0.175288\n",
347 | "Epoch: 20 Train-error: 0.112499419212 Validation-error: 0.123292\n",
348 | "Epoch: 30 Train-error: 0.101820785125 Validation-error: 0.112566\n",
349 | "Epoch: 40 Train-error: 0.0957876971224 Validation-error: 0.106434\n",
350 | "Epoch: 50 Train-error: 0.0907092668271 Validation-error: 0.10041\n",
351 | "Epoch: 60 Train-error: 0.0834965036406 Validation-error: 0.0900948\n",
352 | "Epoch: 70 Train-error: 0.0723732309804 Validation-error: 0.0751268\n",
353 | "Epoch: 80 Train-error: 0.0587168289116 Validation-error: 0.0580571\n",
354 | "Epoch: 90 Train-error: 0.0461099247953 Validation-error: 0.0434318\n",
355 | "*********** Test ***********\n",
356 | "Test-error: 0.0342728 Accuracy: 100.0\n"
357 | ]
358 | }
359 | ],
360 | "source": [
361 | "with tf.Session() as sess:\n",
362 | "\n",
363 | " tf.global_variables_initializer().run()\n",
364 | "\n",
365 | " print(\"*********** Train ***********\")\n",
366 | "\n",
367 | " # Epoch training\n",
368 | " for epoch in range(n_epochs):\n",
369 | "\n",
370 | " tr_err = []\n",
371 | "\n",
372 | " for i in range(len(features_train)):\n",
373 | " _, error = sess.run([optimizer, err], feed_dict={\n",
374 | " X: features_train[i].reshape(1, -1),\n",
375 | " Y: labels_train[i]\n",
376 | " })\n",
377 | " tr_err.append(error)\n",
378 | "\n",
379 | " if epoch % 10 == 0:\n",
380 | " # use test set for validation\n",
381 | " val_err = err.eval({X: features_test, Y: labels_test})\n",
382 | " train_err = sum(tr_err) / len(tr_err)\n",
383 | " print(\"Epoch:\", epoch, \" Train-error:\", train_err, \" Validation-error:\", val_err)\n",
384 | "\n",
385 | " print(\"*********** Test ***********\")\n",
386 | "\n",
387 | " correct = tf.equal(tf.argmax(op, 1), tf.argmax(Y, 1))\n",
388 | " accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n",
389 | " test_error = err.eval({X: features_test, Y: labels_test})\n",
390 | " test_accuracy = accuracy.eval({X: features_test, Y: labels_test}) * 100\n",
391 | " print('Test-error:', test_error, 'Accuracy:', test_accuracy)"
392 | ]
393 | }
394 | ],
395 | "metadata": {
396 | "kernelspec": {
397 | "display_name": "Python 3",
398 | "language": "python",
399 | "name": "python3"
400 | },
401 | "language_info": {
402 | "codemirror_mode": {
403 | "name": "ipython",
404 | "version": 3
405 | },
406 | "file_extension": ".py",
407 | "mimetype": "text/x-python",
408 | "name": "python",
409 | "nbconvert_exporter": "python",
410 | "pygments_lexer": "ipython3",
411 | "version": "3.6.2"
412 | }
413 | },
414 | "nbformat": 4,
415 | "nbformat_minor": 2
416 | }
417 |
--------------------------------------------------------------------------------
/Hopfield-Network/code/np_hnn_reconstruction.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | # Util functions
7 |
8 |
9 | # function to plot the images after during testing phase
10 | def plot_images(images, title, no_i_x, no_i_y=3):
11 | fig = plt.figure(figsize=(10, 15))
12 | fig.canvas.set_window_title(title)
13 | images = np.array(images).reshape(-1, 5, 5)
14 | images = np.pad(
15 | images, ((0, 0), (1, 1), (1, 1)), 'constant', constant_values=-1)
16 | for i in range(no_i_x):
17 | for j in range(no_i_y):
18 | ax = fig.add_subplot(no_i_x, no_i_y, no_i_x * j + (i + 1))
19 | ax.matshow(images[no_i_x * j + i], cmap="gray")
20 | plt.xticks(np.array([]))
21 | plt.yticks(np.array([]))
22 |
23 | if j == 0 and i == 0:
24 | ax.set_title("Real")
25 | elif j == 0 and i == 1:
26 | ax.set_title("Distorted")
27 | elif j == 0 and i == 2:
28 | ax.set_title("Reconstructed")
29 |
30 |
31 | # Dummy Data
32 | perfect_data = {
33 | "P": [
34 | 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1,
35 | -1, -1, -1, -1
36 | ],
37 | "Y": [
38 | 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1,
39 | -1, -1, -1, 1, -1, -1
40 | ],
41 | "T": [
42 | 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1,
43 | -1, -1, 1, -1, -1
44 | ],
45 | "H": [
46 | 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1,
47 | -1, -1, -1, 1
48 | ],
49 | "O": [
50 | 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1,
51 | 1, 1, 1, 1
52 | ],
53 | "N": [
54 | 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1,
55 | -1, -1, -1, 1
56 | ]
57 | }
58 |
59 | # Pre-Process Data
60 |
61 | # Data Parameters
62 |
63 | # Hopfield networks can hold about 0.138 \* n_neurons for better denoising
64 | # 0.138 \* n_neurons = 0.138 \* 25 = 3.45 ~ 3
65 | n_train = 3
66 |
67 | n_test = 100
68 |
69 | # no of images to show in output plot
70 | n_train_disp = 10
71 |
72 | # Amount of distortion (0 < distort < 1)
73 | distort = 0.1
74 |
75 | # Size of image(width)
76 | n_side = 5
77 |
78 | # No of neurons
79 | n_neurons = n_side * n_side
80 |
81 | train_data = [np.array(d) for d in perfect_data.values()][:n_train]
82 |
83 | # Generate test data by adding noise to train data
84 | test_data = []
85 | for d in range(n_test):
86 | r_i = np.random.randint(0, n_train)
87 | base_pattern = np.array(train_data[r_i])
88 | noise = 1 * (np.random.random(base_pattern.shape) > distort)
89 | np.place(noise, noise == 0, -1)
90 | noisy_pattern = np.multiply(base_pattern, noise)
91 | test_data.append((base_pattern, noisy_pattern))
92 |
93 | # Neural Network
94 |
95 |
96 | # Function to train the network using Hebbian learning rule
97 | def train(neu, training_data):
98 | w = np.zeros([neu, neu])
99 | for data in training_data:
100 | w += np.outer(data, data)
101 | for diag in range(neu):
102 | w[diag][diag] = 0
103 | return w
104 |
105 |
106 | # Function to test the network
107 | def test(weights, testing_data):
108 | success = 0.0
109 |
110 | output_data = []
111 |
112 | for data in testing_data:
113 | true_data = data[0]
114 | noisy_data = data[1]
115 | predicted_data = retrieve_pattern(weights, noisy_data)
116 | if np.array_equal(true_data, predicted_data):
117 | success += 1.0
118 | output_data.append([true_data, noisy_data, predicted_data])
119 |
120 | return (success / len(testing_data)), output_data
121 |
122 |
123 | # Function to retrieve individual noisy patterns
124 | def retrieve_pattern(weights, data, steps=10):
125 | res = np.array(data)
126 |
127 | for _ in range(steps):
128 | for i in range(len(res)):
129 | raw_v = np.dot(weights[i], res)
130 | if raw_v > 0:
131 | res[i] = 1
132 | else:
133 | res[i] = -1
134 | return res
135 |
136 |
137 | # Train
138 | W = train(n_neurons, train_data)
139 |
140 | # Test
141 | accuracy, op_imgs = test(W, test_data)
142 |
143 | # Print accuracy
144 | print("Accuracy of the network is %f" % (accuracy * 100))
145 |
146 | # Plot test result
147 | plot_images(op_imgs, "Reconstructed Data", n_train_disp)
148 | plt.show()
149 |
--------------------------------------------------------------------------------
/Hopfield-Network/np_hnn_reconstruction.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Hopfield Network\n",
8 | "###### data reconstruction"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import matplotlib.pyplot as plt\n",
28 | "%matplotlib inline"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "### Util functions"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "function to plot the images after during testing phase"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 2,
48 | "metadata": {
49 | "collapsed": true
50 | },
51 | "outputs": [],
52 | "source": [
53 | "def plot_images(images, title, no_i_x, no_i_y=3):\n",
54 | " fig = plt.figure(figsize=(10, 15))\n",
55 | " fig.canvas.set_window_title(title)\n",
56 | " images = np.array(images).reshape(-1, 5, 5)\n",
57 | " images = np.pad(images, ((0, 0), (1, 1), (1, 1)), 'constant', constant_values=-1)\n",
58 | " for i in range(no_i_x):\n",
59 | " for j in range(no_i_y):\n",
60 | " ax = fig.add_subplot(no_i_x, no_i_y, no_i_x * j + (i + 1))\n",
61 | " ax.matshow(images[no_i_x * j + i], cmap=\"gray\")\n",
62 | " plt.xticks(np.array([]))\n",
63 | " plt.yticks(np.array([]))\n",
64 | "\n",
65 | " if j == 0 and i == 0:\n",
66 | " ax.set_title(\"Real\")\n",
67 | " elif j == 0 and i == 1:\n",
68 | " ax.set_title(\"Distorted\")\n",
69 | " elif j == 0 and i == 2:\n",
70 | " ax.set_title(\"Reconstructed\")"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "metadata": {},
76 | "source": [
77 | "#### Dummy Data"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": 3,
83 | "metadata": {
84 | "collapsed": true
85 | },
86 | "outputs": [],
87 | "source": [
88 | "perfect_data = {\n",
89 | " \"P\": [1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1],\n",
90 | " \"Y\": [1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1],\n",
91 | " \"T\": [1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1],\n",
92 | " \"H\": [1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1],\n",
93 | " \"O\": [1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1],\n",
94 | " \"N\": [1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1]\n",
95 | "}"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "### Pre-Process Data"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": [
109 | "##### Data Parameters"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "Hopfield networks can hold about 0.138 \\* n_neurons for better denoising
\n",
117 | "0.138 \\* n_neurons = 0.138 \\* 25 = 3.45 ~ 3
"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": 4,
123 | "metadata": {
124 | "collapsed": true
125 | },
126 | "outputs": [],
127 | "source": [
128 | "n_train = 3"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 5,
134 | "metadata": {
135 | "collapsed": true
136 | },
137 | "outputs": [],
138 | "source": [
139 | "n_test = 100"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 6,
145 | "metadata": {
146 | "collapsed": true
147 | },
148 | "outputs": [],
149 | "source": [
150 | "# no of images to show in output plot\n",
151 | "n_train_disp = 10\n",
152 | "\n",
153 | "# Amount of distortion (0 < distort < 1)\n",
154 | "distort = 0.1\n",
155 | "\n",
156 | "# Size of image(width)\n",
157 | "n_side = 5\n",
158 | "\n",
159 | "# No of neurons\n",
160 | "n_neurons = n_side * n_side"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 7,
166 | "metadata": {
167 | "collapsed": true
168 | },
169 | "outputs": [],
170 | "source": [
171 | "train_data = [np.array(d) for d in perfect_data.values()][:n_train]"
172 | ]
173 | },
174 | {
175 | "cell_type": "markdown",
176 | "metadata": {},
177 | "source": [
178 | "Generate test data by adding noise to train data"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": 8,
184 | "metadata": {
185 | "collapsed": true
186 | },
187 | "outputs": [],
188 | "source": [
189 | "test_data = []\n",
190 | "for d in range(n_test):\n",
191 | " r_i = np.random.randint(0, n_train)\n",
192 | " base_pattern = np.array(train_data[r_i])\n",
193 | " noise = 1 * (np.random.random(base_pattern.shape) > distort)\n",
194 | " np.place(noise, noise == 0, -1)\n",
195 | " noisy_pattern = np.multiply(base_pattern, noise)\n",
196 | " test_data.append((base_pattern, noisy_pattern))"
197 | ]
198 | },
199 | {
200 | "cell_type": "markdown",
201 | "metadata": {},
202 | "source": [
203 | "### Neural Network"
204 | ]
205 | },
206 | {
207 | "cell_type": "markdown",
208 | "metadata": {},
209 | "source": [
210 | "Function to train the network using Hebbian learning rule"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": 9,
216 | "metadata": {
217 | "collapsed": true
218 | },
219 | "outputs": [],
220 | "source": [
221 | "def train(neu, training_data):\n",
222 | " w = np.zeros([neu, neu])\n",
223 | " for data in training_data:\n",
224 | " w += np.outer(data, data)\n",
225 | " for diag in range(neu):\n",
226 | " w[diag][diag] = 0\n",
227 | " return w"
228 | ]
229 | },
230 | {
231 | "cell_type": "markdown",
232 | "metadata": {},
233 | "source": [
234 | "Function to test the network"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": 10,
240 | "metadata": {
241 | "collapsed": true
242 | },
243 | "outputs": [],
244 | "source": [
245 | "def test(weights, testing_data):\n",
246 | " success = 0.0\n",
247 | "\n",
248 | " output_data = []\n",
249 | "\n",
250 | " for data in testing_data:\n",
251 | " true_data = data[0]\n",
252 | " noisy_data = data[1]\n",
253 | " predicted_data = retrieve_pattern(weights, noisy_data)\n",
254 | " if np.array_equal(true_data, predicted_data):\n",
255 | " success += 1.0\n",
256 | " output_data.append([true_data, noisy_data, predicted_data])\n",
257 | "\n",
258 | " return (success / len(testing_data)), output_data"
259 | ]
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {},
264 | "source": [
265 | "Function to retrieve individual noisy patterns"
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": 11,
271 | "metadata": {
272 | "collapsed": true
273 | },
274 | "outputs": [],
275 | "source": [
276 | "def retrieve_pattern(weights, data, steps=10):\n",
277 | " res = np.array(data)\n",
278 | "\n",
279 | " for _ in range(steps):\n",
280 | " for i in range(len(res)):\n",
281 | " raw_v = np.dot(weights[i], res)\n",
282 | " if raw_v > 0:\n",
283 | " res[i] = 1\n",
284 | " else:\n",
285 | " res[i] = -1\n",
286 | " return res"
287 | ]
288 | },
289 | {
290 | "cell_type": "markdown",
291 | "metadata": {},
292 | "source": [
293 | "#### Train"
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": 12,
299 | "metadata": {
300 | "collapsed": true
301 | },
302 | "outputs": [],
303 | "source": [
304 | "W = train(n_neurons, train_data)"
305 | ]
306 | },
307 | {
308 | "cell_type": "markdown",
309 | "metadata": {},
310 | "source": [
311 | "#### Test"
312 | ]
313 | },
314 | {
315 | "cell_type": "code",
316 | "execution_count": 13,
317 | "metadata": {
318 | "collapsed": true
319 | },
320 | "outputs": [],
321 | "source": [
322 | "accuracy, op_imgs = test(W, test_data)"
323 | ]
324 | },
325 | {
326 | "cell_type": "markdown",
327 | "metadata": {},
328 | "source": [
329 | "Print accuracy"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": 14,
335 | "metadata": {},
336 | "outputs": [
337 | {
338 | "name": "stdout",
339 | "output_type": "stream",
340 | "text": [
341 | "Accuracy of the network is 97.000000\n"
342 | ]
343 | }
344 | ],
345 | "source": [
346 | "print(\"Accuracy of the network is %f\" % (accuracy * 100))"
347 | ]
348 | },
349 | {
350 | "cell_type": "markdown",
351 | "metadata": {},
352 | "source": [
353 | "#### Plot test result"
354 | ]
355 | },
356 | {
357 | "cell_type": "code",
358 | "execution_count": 15,
359 | "metadata": {},
360 | "outputs": [
361 | {
362 | "data": {
363 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAekAAANUCAYAAACXBdUVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGyhJREFUeJzt3W2srWld3/H/fzyIo46OKLadwTkWH9CqLU1KbVraEtHQ\nIqN9oSI+VIkP8UXb2PpAtUWgilJraqOmqVaUCIraEA0oPkAi+ECNqQY0mJqgcDiCKAcYHQGVytUX\n933C2mevmXOv7Vrn/u29Pp9kwsxZ66x97cP6r+++7rP3tXqMUQBAntvWXgAAsJ1IA0AokQaAUCIN\nAKFEGgBCiTQAhLrQke7uV3T3l6+9Dtimu/9Hdz997XUs0d3P7O4XrL0OuCi6+3Hd/fs3u19EpLv7\nDd397u7+0+5+S3c/r7s/eO11wV/FxvP6/u6+r7tf1d1f1d23VVWNMb5qjPHNCx/n0/ewlr/SY0DV\n+Xm9PvQXlvPn/S2HevzrIiI9u3eM8cFV9eiq+rtV9Q0rrwf24d4xxh1VdbmqnlNVT6uq596qD97d\nl27Vx+KonPvX654kNXCruAWOMd5SVT9X0//51d0P7e7v6O43dvcfzpcIb59v+7Du/qnufmt3v2P+\n90esuX7YZozxx2OMF1fVk6vqS7r7kze/Eu/uj5ifv/d199u7+5e6+7bufn5V3VNVL5l3Ll8/3/+z\nuvu18/1f0d2feP1jzTudp3X3b1bVO7v7hQ/wGP9g3t3f192v6e7HbTzG3+zuV85XAV5WVR9xi/6o\nOEd2eb2eb//s7n51d/9Jd/9ud/+z+dfv6u4Xz8/913X3V2z8nmd294939w/Nz8fXdvff27j9ad39\npvm23+nux8+P+41V9eT5Of+a+b6v6O5nd/evVNW7quqRN15lunEH3t2P3ZiTq939pd39lVX1hVX1\n9fPjv2Tj83jR3KTXd/e/2Xic2+eZf0d3/3ZVPWbJn3FcpOfI/vOqet38S8+pqo+v6UnwsVV1d1V9\n03zbbVX1gzXtUu6pqndX1ffcyvXCLsYYv1ZVv19V//iGm75m/vWHV9Vfq+kFZowxvriq3ljzzmWM\n8e3d/fFV9cKq+ur5/i+tKcDvv/F4T6mqz6yqO8cYT9nyGHdX1U9X1bdU1cOq6mur6kXd/fD59/9I\nVf16TXH+5qr6kn3+OXAx7PJ63d1/v6p+qKq+rqrurKp/UlVvmH/fj9b0/L+rqj6nqr61uz9t40N9\n1nyfO6vqxTW/znf3o6rqX1XVY+YrVk+oqjeMMX62qr61qn5sfs7/nY3H+uKq+sqquqOqrtzk87tc\nVT9TVd9d06w9uqpePcb4vqr64ar69vnx75135S+pqtfMn/fjq+qru/sJ88M9o6o+Zv7nCbVwppIi\n/ZPdfX9VXa2qP6qqZ3R31/SH+W/HGG8fY9xf0x/851dVjTHeNsZ40RjjXfNtz66qf7rS+mGpN9cU\nxk3vqaq/UVWXxxjvGWP80njgg/WfXFU/PcZ42RjjPVX1HVV1e1X9w437fNcY4+oY490P8BhfVFUv\nHWO8dIzx3jHGy6rq/1TVE7v7npq+yn/6GOPPxxi/WNOLD1y38+t1VX1ZVf3A/Lx97xjjTWOM/9vd\nH1VV/6iqnjbG+LMxxqur6vur6l9ufLxfnp+rf1lVz6+q69H9y6p6aFX9re5+yBjjDWOM373J2p83\nxnjtGOP/zfPzYL6gql4+xnjhPJdvm9e3zWOq6uFjjP80xviLMcbvVdX/3Pj8P6+qnj3/2Vytqu+6\nyceuqqxI/4v5K6HHVdUn1PQV/MOr6gOr6tfnSw33VdXPzr9e3f2B3f293X2lu/+kqn6xqu7s7vdb\n5TOAZe6uqrff8Gv/pabdyM939+91979/kN9/V23sAMYY763pxfLujftcvckaLlfV516fq3m2HlvT\nFwp3VdU7xhjv3Lj/g+44ODo7v15X1UdV1baA3lVV16N+3ZU6+Xx+y8a/v6uqPqC7L40xXlfTFaVn\nVtUfdfePdvddN1n7zWZj0wOteZvLVXXXDTP1jTVdGauaPs/Nj71oppIiXVVVY4xXVtXzatodXKvp\nEvYnjTHunP/50PkbFqqmS4SPqqpPHWN8SE2XT6qq+hYvGxbp7sfU9OLzy5u/Psa4f4zxNWOMR9Z0\nae/fdffjr998w8O8uaYXhOuP2TW9mLxp8yFv+D03/vfVqnr+xlzdOcb4oDHGc6rqD6rqw7r7gzbu\nf8/yz5JjsePr9dWaLvXe6M1V9bDuvmPj1+6pk8/nB1vDj4wxHlvTTIyq+s/Xb3qg33LDf7+zpi8u\nrvvrG//+QGve9jhXq+r1N8zUHWOMJ863/0FNc3rdopmKi/Tsv1XVZ1TVp9R0ueA7u/sjq6q6++6N\na/x31PSkuK+7H1bTNX+I090f0t1Pqunv1V4wxvitG25/Und/7BzcP67pMt5755v/sKoeuXH3H6+q\nz5y/QeYhNX2x+udV9aoHWcKNj/GCqrq3u5/Q3e/X3R/Q089tPmKMcaWmS9/P6u737+7HVtW9Z/7k\nueiWvl4/t6qeOj9vb5tv+4T50u+rqurb5ufh367p0vhNf3yqux/V3Z/W3Q+tqj+rqQebc/PRffPv\n4H51VX1+dz9k/oa0z9m47Yer6tO7+/O6+1J3f3h3P3rj8Tdn6teq6v75G9lun+fqk+cvzKumuf2G\nnr7h+RFV9a9v9vlVhUZ6jPHWmr7B4Jtq+pGV11XVr86XtF9e0+65anpy3F7TV3C/WtOlFUjyko2/\nu/sPVfVfq+qpW+73cTU9t/+0qv53Vf33McYvzLd9W1X9x/kS2teOMX6npr9T/u6anvv31vRNYX/x\nIOu48TGuVtVn13Q57q3z+r6u3vea8AVV9ak1XZZ/Rk3zCKcsfb2ev2nyqVX1nTV9IfrKet8VoadU\n1UfXtKv+iap6xhjj5Qs+/ENr+ma1azVdEv/Iet+Pg/2v+X/f1t2/8SCP8fSadsvvqKpn1fRNk9c/\ntzdW1RNr+kL47TUF/frfhz+3pr8Lv6+7f3L++/In1fTNZa+f1/T9VfWh8/2fVdMl7tdX1c/X9Hfr\nN9UP/L0pAMCaInfSAIBIA0AskQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQaqc3hO9uJ59sMcZwVviR\nMhPbmYnjZSa2O+tM2EkDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABBKpAEg1E6HmSw1xsX4WfZu\n5zGwH/ueiaXPzbU+LtyMTixjJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoQ5y\n4ti+T2BZejKN05BIZSbgJDOxjJ00AIQSaQAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQS\naQAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEOrS2gsA1jfGWHS/7j7wSoBN\ndtIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEMqJY4CTxCCUnTQAhBJpAAgl0gAQ\nSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhDoXJ445DQlOMhNw0kWdCTtpAAgl0gAQSqQBIJRIA0Ao\nkQaAUCINAKFEGgBCiTQAhBJpAAi164lj16rqyiEWco5dXnsBrMpMnGYmjpuZOO3MM9FjjH0uBADY\nE5e7ASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASCUSANAKJEGgFAiDQChdnoXrO72\nbhxbjDF67TWwDjOxnZk4XmZiu7POhJ00AIQSaQAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQu10\nmMlSY1yMn2Xvdh4D+5E+E57r3GrpM7HUoWfHThoAQok0AIQSaQAIJdIAEEqkASCUSANAKJEGgFAi\nDQChRBoAQh3kxLF9n8Cy9GQapyaRaq2Z2PfjmTH2RSeWsZMGgFAiDQChRBoAQok0AIQSaQAIJdIA\nEEqkASCUSANAKJEGgFAHOXEMOJt9nyR23k5XAk6ykwaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQB\nIJRIA0AokQaAUE4cgyBLTwjb98lkSx/PCWZwa9lJA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQ\nSqQBIJRIA0AoJ44BThKDUHbSABBKpAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABDqXJw4\n5jQkOMlMwEkXdSbspAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABBKpAEg1K4njl2rqiuH\nWMg5dnntBbAqM3GamThuZuK0M89EjzH2uRAAYE9c7gaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQB\nIJRIA0AokQaAUCINAKFEGgBCiTQAhNrpXbC627txbDHG6LXXwDrMxHZm4niZie3OOhN20gAQSqQB\nIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAi102EmS41xMX6Wvdt5DOzHvmdi6XNzrY8LN6MTy9hJ\nA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0Cog5w4tu8TWJaeTOM0JFKZCTjJTCxj\nJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoQ5y4hhwNktPTQKOg500AIQSaQAI\nJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIRy4hgE6e5F93MyGRwHO2kACCXSABBKpAEglEgD\nQCiRBoBQIg0AoUQaAEKJNACEEmkACOXEMTiHlp5MBpxvdtIAEEqkASCUSANAKJEGgFAiDQChRBoA\nQok0AIQSaQAIJdIAEOpcnDjmdCUAHsxF7YSdNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoUQa\nAEKJNACE2vXEsWtVdeUQCznHLq+9AFZlJk4zE8fNTJx25pnoMcY+FwIA7InL3QAQSqQBIJRIA0Ao\nkQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUDu9C1Z3ezeOLcYYvfYaWIeZ2M5M\nHC8zsd1ZZ8JOGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKF2OsxkqTGyf5a92zkL3Fr7\nnomlz+GlH9dMcKvpxDJ20gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQ6iAnji21\n1okuTmEi1VoniZkJUh17J+ykASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASDUqieO\nLT3RZd+cmkQqMwEnHftM2EkDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQKhVTxxL\nOdEFUpgJOOnYZ8JOGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl\n0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAh16RAP2t2HeFg4t8wEnGQmlrGTBoBQIg0A\noUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQu544dq2qrhxiIefY5bUXwKrMxGlm4riZidPO\nPBM9xtjnQgCAPXG5GwBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl\n0gAQaqd3wepu78axxRij114D6zAT25mJ42UmtjvrTNhJA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl\n0gAQSqQBINROh5ksNcayn2XvXue8g/T1cfHs+zmX/nhwM+nPuZT12UkDQCiRBoBQIg0AoUQaAEKJ\nNACEEmkACCXSABBKpAEglEgDQKiDnDjm1CQ4yUzASWZiGTtpAAgl0gAQSqQBIJRIA0AokQaAUCIN\nAKFEGgBCiTQAhBJpAAh1kBPHlrqoJ8TAWZkJOOnYZ8JOGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0Ao\nkQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAh1ae0F\nAIczxlh0v+4+8EqAs7CTBoBQIg0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQ5+LEMach\nwUlmAk66qDNhJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoXY9cexaVV05xELO\nsctrL4BVmYnTzMRxMxOnnXkmeoyxz4UAAHvicjcAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFE\nGgBCiTQAhBJpAAgl0gAQSqQBINRO74LV3d6NY4sxRq+9BtZhJrYzE8fLTGx31pmwkwaAUCINAKFE\nGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0ConQ4zWWqMZT/L3r3OeQfp6+PiSX/Opa+Piyf9OZeyPjtp\nAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAh1kBPHlp7Asu8TXVJOiIEb7fs5vG9m\ngltNJ5axkwaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUAc5cWypi3pCDJyVmYCT\njn0m7KQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBC\niTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaA\nUCINAKFEGgBCXVp7AUt099pLgChmAk66qDNhJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiR\nBoBQIg0AoXY9cexaVV05xELOsctrL4BVmYnTzMRxMxOnnXkmeoyxz4UAAHvicjcAhBJpAAgl0gAQ\nSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQaqc32OhuB31vMcbotdfAOszEdmbieJmJ\n7c46E3bSABBKpAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABBqpxPHlhoj+8CZbochcWut\nNROe66TSiWXspAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABBKpAEg1EFOHFtqrRNdlp50\nk3LiDOef5xKczbF3wk4aAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoUQaAEKteuLY0hNd\n9s3pT6QyE3DSsc+EnTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhFr1xLGUE13g\n0JaemmQm4KRjnwk7aQAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASCU\nSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASDUpUM8aHcf4mHh3DITcJKZWMZOGgBCiTQA\nhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBC7Xri2LWqunKIhZxjl9deAKsyE6eZieNmJk47\n80z0GGOfCwEA9sTlbgAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASDU\nTm+w0d0O+t5ijNFrr4F1mIntzMTxMhPbnXUm7KQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl\n0gAQaqfDTJYa42L8LHu38xjYDzMBJ5mJZeykASCUSANAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIA\nEEqkASDUQU4c2/cJLEtPpnEaEqnMBJxkJpaxkwaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRI\nA0AokQaAUAc5cQw4rPN2ahJwNnbSABBKpAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXSABDK\niWNwDo0xFt3PyWRwvtlJA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCIN\nAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKEurb2AJbp77SVAFDMBJ13UmbCTBoBQ\nIg0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQu544dq2qrhxiIefY5bUXwKrMxGlm4riZ\nidPOPBM9xtjnQgCAPXG5GwBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJp\nAAgl0gAQaqd3wepu78axxRij114D6zAT25mJ42UmtjvrTNhJA0AokQaAUCINAKFEGgBCiTQAhBJp\nAAgl0gAQSqQBINROh5ksNcbF+Fn2bucxsB9mAk4yE8vYSQNAKJEGgFAiDQChRBoAQok0AIQSaQAI\nJdIAEEqkASCUSANAqIOcOLbvE1iWnkzjNCRSmQk4yUwsYycNAKFEGgBCiTQAhBJpAAgl0gAQSqQB\nIJRIA0AokQaAUCINAKEOcuIYcFjn7dQk4GzspAEglEgDQCiRBoBQIg0AoUQaAEKJNACEEmkACCXS\nABBKpAEglBPH4BwaYyy6n5PJ4HyzkwaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaA\nUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCXVp7AUt099pLgChmAk66\nqDNhJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoXY9cexaVV05xELOsctrL4BV\nmYnTzMRxMxOnnXkmeoyxz4UAAHvicjcAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQA\nhBJpAAgl0gAQSqQBINRO74LV3d6NY4sxRq+9BtZhJrYzE8fLTGx31pmwkwaAUCINAKFEGgBCiTQA\nhBJpAAgl0gAQSqQBIJRIA0ConQ4zWWqMi/Gz7N3OY2A/zAScZCaWsZMGgFAiDQChRBoAQok0AIQS\naQAIJdIAEEqkASCUSANAKJEGgFAHOXFs3yewLD2ZxmlIpDITcJKZWMZOGgBCiTQAhBJpAAgl0gAQ\nSqQBIJRIA0AokQaAUCINAKFEGgBCiTQAhBJpAAgl0gAQSqQBIJRIA0AokQaAUCINAKFEGgBCiTQA\nhBJpAAh1ae0FAO8zxljl8bp7rx8X2A87aQAIJdIAEEqkASCUSANAKJEGgFAiDQChRBoAQok0AIQS\naQAI5cQxCLLvk7/2fYIZcGvZSQNAKJEGgFAiDQChRBoAQok0AIQSaQAIJdIAEEqkASCUSANAqHNx\n4ti+T2GCY2F2OBYX9bluJw0AoUQaAEKJNACEEmkACCXSABBKpAEglEgDQCiRBoBQIg0AoXY9cexa\nVV05xELOsctrL4BVmYnTzMRxMxOnnXkmeoyxz4UAAHvicjcAhBJpAAgl0gAQSqQBIJRIA0AokQaA\nUCINAKFEGgBCiTQAhPr/KFbp6r/2rhMAAAAASUVORK5CYII=\n",
364 | "text/plain": [
365 | ""
366 | ]
367 | },
368 | "metadata": {},
369 | "output_type": "display_data"
370 | }
371 | ],
372 | "source": [
373 | "plot_images(op_imgs, \"Reconstructed Data\", n_train_disp)\n",
374 | "plt.show()"
375 | ]
376 | }
377 | ],
378 | "metadata": {
379 | "kernelspec": {
380 | "display_name": "Python 3",
381 | "language": "python",
382 | "name": "python3"
383 | },
384 | "language_info": {
385 | "codemirror_mode": {
386 | "name": "ipython",
387 | "version": 3
388 | },
389 | "file_extension": ".py",
390 | "mimetype": "text/x-python",
391 | "name": "python",
392 | "nbconvert_exporter": "python",
393 | "pygments_lexer": "ipython3",
394 | "version": "3.6.0"
395 | }
396 | },
397 | "nbformat": 4,
398 | "nbformat_minor": 2
399 | }
400 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Hello NN
2 |
3 | My Neural Network Codes when I was beginner in Artificial Neural Networks
4 |
5 | Clean, Commented, Single-Filed Python Programs + Jupyter Notebooks
6 |
7 | ## Prerequisites
8 |
9 | The programs depends upon following dependencies:
10 |
11 | * Python 3.5+
12 | * Numpy
13 | * Matplotlib
14 | * Tensorflow
15 |
16 | ## Index
17 |
18 |
19 | * FeedForward Neural Network
20 | * Classification `Iris dataset`
21 | * numpy - [code](Feedforward-NeuralNet/code/np_nn_iris_classification.py), [notebook](Feedforward-NeuralNet/np_nn_iris_classification.ipynb)
22 | * tensorflow - [code](Feedforward-NeuralNet/code/tf_nn_iris_classification.py), [notebook](Feedforward-NeuralNet/tf_nn_iris_classification.ipynb)
23 |
24 | * Recurrent Neural Network
25 | * Addition `8-bit numbers`
26 | * numpy - [code](Recurrent-NeuralNet/code/np_rnn_addition.py), [notebook](Recurrent-NeuralNet/np_rnn_addition.ipynb)
27 | * tensorflow - [code](Recurrent-NeuralNet/code/tf_rnn_addition.py), [notebook](Recurrent-NeuralNet/tf_rnn_addition.ipynb)
28 | * Time-series `CO2(ppm) mauna loa, 1965-1980`
29 | * tensorflow - [code](Recurrent-NeuralNet/code/tf_lstm_climate_timeseries.py), [notebook](Recurrent-NeuralNet/tf_lstm_climate_timeseries.ipynb)
30 |
31 | * Convolutional Neural Network
32 | * Classification `MNIST`
33 | * tensorflow - [code](Convolutional-NeuralNet/code/tf_cnn_mnist_classification.py), [notebook](Convolutional-NeuralNet/tf_cnn_mnist_classification.ipyn)
34 |
35 | * Hopfield Network
36 | * Data Reconstruction
37 | * numpy - [code](Hopfield-Network/code/np_hnn_reconstruction.py), [notebook](Hopfield-Network/np_hnn_reconstruction.ipynb)
38 |
39 | * Restricted Boltzmann Machine
40 | * Image Reconstruction `MNIST`
41 | * tensorflow - [code](Restricted-Boltzmann-Machine/code/tf_rbm_mnist_reconstruction.py), [notebook](Restricted-Boltzmann-Machine/tf_rbm_mnist_reconstruction.ipynb)
42 |
43 | * Denoising Neural AutoEncoder
44 | * Image Denoising `MNIST`
45 | * tensorflow - [code](Denoising-Autoencoder/code/tf_dae_mnist_reconstruction.py), [notebook](Denoising-Autoencoder/tf_dae_mnist_reconstruction.ipynb)
46 |
47 | * Deconvolutional Neural AutoEncoder
48 | * Image Reconstruction `MNIST`
49 | * tensorflow - [code](Deconvolutional-Autoencoder/code/tf_dcae_mnist_reconstruction.py), [notebook](Deconvolutional-Autoencoder/tf_dcae_mnist_reconstruction.ipynb)
50 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/code/np_rnn_addition.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 |
5 | # Pre-process data
6 |
7 | # seed random number generator
8 | np.random.seed(1)
9 |
10 | total_test_cases = 100
11 | train_test_ratio = 0.80
12 |
13 | tmp_list = []
14 | features = []
15 | labels = []
16 |
17 | # generate data (addition)
18 | for _ in range(total_test_cases):
19 | a = np.random.randint(0, 128)
20 | b = np.random.randint(0, 128)
21 | c = a + b
22 |
23 | features.append([a, b])
24 | labels.append(c)
25 |
26 | features = np.array(features, dtype=np.uint8).reshape(-1, 1)
27 | labels = np.array(labels, dtype=np.uint8).reshape(-1, 1)
28 | features = np.unpackbits(features, axis=1)
29 | labels = np.unpackbits(labels, axis=1)
30 |
31 | for i in range(len(labels)):
32 | tmp_list.append([features[2 * i], features[2 * i + 1]])
33 |
34 | features = np.array(tmp_list)
35 |
36 | # split into train-test set
37 | features_train = np.array(features[:int(train_test_ratio * len(features))])
38 | features_test = np.array(features[int(train_test_ratio * len(features)):])
39 |
40 | labels_train = labels[:int(train_test_ratio * len(labels))]
41 | labels_test = labels[int(train_test_ratio * len(labels)):]
42 |
43 | # Neural Network
44 |
45 | # hyper-parameters
46 | n_input_layers = 2
47 | n_hidden_layers = 16
48 | n_output_layers = 1
49 | n_sequence = 8
50 |
51 | learning_rate = 1
52 |
53 | n_epochs = 100
54 |
55 | # Activation functions and their derivative
56 | activation_f = {
57 | 'identity': lambda f_x: f_x,
58 | 'sigmoid': lambda f_x: 1.0 / (1.0 + np.exp(-f_x)),
59 | 'tanh': lambda f_x: np.tanh(f_x),
60 | 'arctan': lambda f_x: np.arctan(f_x),
61 | 'relu': lambda f_x: f_x * (f_x > 0),
62 | 'softplus': lambda f_x: np.log(1 + np.exp(f_x)),
63 | 'sinusoid': lambda f_x: np.sin(f_x),
64 | 'gaussian': lambda f_x: np.exp(-f_x * f_x)
65 | }
66 | activation_f_prime = {
67 | 'identity': lambda f_dx: 1,
68 | 'sigmoid': lambda f_dx: f_dx * (1.0 - f_dx),
69 | 'tanh': lambda f_dx: 1.0 - f_dx**2,
70 | 'arctan': lambda f_dx: 1.0 / (1.0 + np.tan(f_dx)**2),
71 | 'relu': lambda f_dx: 1.0 * (f_dx > 0),
72 | 'softplus': lambda f_dx: 1.0 - np.exp(-f_dx),
73 | 'sinusoid': lambda f_dx: np.cos(np.arcsin(f_dx)),
74 | 'gaussian': lambda f_dx: -2 * f_dx * np.sqrt(-np.log(f_dx))
75 | }
76 |
77 | # Activation function parameters
78 | f1 = 'sigmoid'
79 | f2 = 'sigmoid'
80 |
81 | act_f1 = activation_f[f1]
82 | act_f2 = activation_f[f2]
83 |
84 | act_f1_prime = activation_f_prime[f1]
85 | act_f2_prime = activation_f_prime[f2]
86 |
87 | # Initialize random weights
88 | V = np.random.normal(scale=0.1, size=(n_input_layers, n_hidden_layers))
89 | W = np.random.normal(scale=0.1, size=(n_hidden_layers, n_output_layers))
90 | R = np.random.normal(scale=0.1, size=(n_hidden_layers, n_hidden_layers))
91 |
92 | # Train
93 |
94 | print("########## TRAIN ##########")
95 |
96 | # Training-set
97 | X = features_train
98 | Y = labels_train
99 |
100 | # Epoch-training
101 | for e in range(n_epochs):
102 |
103 | E = 0
104 |
105 | for i in range(X.shape[0]):
106 |
107 | err = 0
108 |
109 | V_update = np.zeros_like(V)
110 | W_update = np.zeros_like(W)
111 | R_update = np.zeros_like(R)
112 |
113 | h_layers = [np.zeros((1, n_hidden_layers))]
114 |
115 | dels = []
116 |
117 | # Forward Pass
118 | for j in range(n_sequence):
119 |
120 | # Forward Prop
121 | x = np.array([X[i][0][-j - 1], X[i][1][-j - 1]]).reshape(1, -1)
122 | y = np.array(Y[i][-j - 1])
123 |
124 | h_inter = np.dot(x, V) + np.dot(h_layers[-1], R)
125 | h_final = act_f1(h_inter)
126 | o_inter = np.dot(h_final, W)
127 | o_final = act_f2(o_inter)
128 |
129 | # Store hidden layer
130 | h_layers.append(h_final)
131 |
132 | err += (0.5 * np.square(y - o_final))[0][0]
133 |
134 | # Backward Prop
135 | del_h_o = -np.multiply(y - o_final, act_f2_prime(o_final))
136 |
137 | # Store delta
138 | dels.append(del_h_o)
139 |
140 | change_h_o = np.dot(h_final.T, del_h_o)
141 | W_update += change_h_o
142 |
143 | next_del = np.zeros(n_hidden_layers)
144 |
145 | # Backward Propagation through time
146 | for j in range(n_sequence):
147 | x = np.array([X[i][0][j], X[i][1][j]]).reshape(1, -1)
148 |
149 | del_h = (np.dot(next_del, R.T) + np.dot(dels[-j - 1], W.T)
150 | ) * act_f1_prime(h_layers[-j - 1])
151 |
152 | change_h_h = np.dot(h_layers[-j - 2].T, del_h)
153 | change_i_h = np.dot(x.T, del_h)
154 |
155 | R_update += change_h_h
156 | V_update += change_i_h
157 |
158 | next_del = del_h
159 |
160 | E += err / n_sequence
161 |
162 | # Adjust Weights
163 | V -= V_update * learning_rate
164 | W -= W_update * learning_rate
165 | R -= R_update * learning_rate
166 |
167 | if e % 10 == 0:
168 | print("Epoch: %d Error: %f" % (e, E / X.shape[0]))
169 |
170 | # ### Test
171 |
172 | print("########## TEST ##########")
173 |
174 | # Test-set
175 | X = features_test
176 | Y = labels_test
177 |
178 | success = 0
179 |
180 | # Start Test
181 | for i in range(X.shape[0]):
182 |
183 | a = np.packbits(X[i][0])[0]
184 | b = np.packbits(X[i][1])[0]
185 |
186 | d = np.packbits(Y[i])[0]
187 |
188 | c = []
189 |
190 | h_layer = np.zeros((1, n_hidden_layers))
191 |
192 | for j in range(n_sequence):
193 | x = np.array([X[i][0][-j - 1], X[i][1][-j - 1]]).reshape(1, -1)
194 | y = np.array(Y[i][-j - 1])
195 |
196 | # Forward prop
197 | h_inter = np.dot(x, V) + np.dot(h_layer, R)
198 | h_final = act_f1(h_inter)
199 | o_inter = np.dot(h_final, W)
200 | o_final = act_f2(o_inter)
201 |
202 | h_layer = h_final
203 |
204 | c.insert(0, (o_final > 0.5).astype(int)[0][0])
205 |
206 | c = np.packbits(c)[0]
207 |
208 | if c == d:
209 | success += 1
210 |
211 | print("%d + %d = %d \t --> %5s " % (a, b, c, c == d))
212 |
213 | print("\nSuccess: %d/%d, Accuracy = %f" % (success, X.shape[0],
214 | success / X.shape[0] * 100))
215 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/code/tf_lstm_climate_timeseries.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | import matplotlib.pyplot as plt
6 |
7 | # Pre-process data
8 |
9 | # seed random number generators
10 | np.random.seed(1)
11 | tf.set_random_seed(1)
12 |
13 | # Paramaters
14 | seq_len = 50 # Sequence Length
15 | train_test_ratio = 0.7
16 |
17 | time_series = [] # float values in ppm
18 | time_stamps = [] # string corresponding to year-month
19 |
20 | # read from data from csv file
21 | with open('datasets/co2-ppm-mauna-loa-19651980.csv') as f:
22 | skipped_line = False
23 | for line in f.readlines():
24 | if not skipped_line:
25 | skipped_line = True
26 | continue
27 | else:
28 | try:
29 | line = line.strip().split(',')
30 | time_series.append([float(line[1])])
31 | time_stamps.append(line[0].strip('"'))
32 | except Exception as e:
33 | break
34 |
35 | # Scale data
36 | ts_min = np.min(time_series)
37 | ts_max = np.max(time_series)
38 |
39 | time_series = (time_series - ts_min) / (ts_max - ts_min)
40 |
41 | # Split data into train and test
42 | train_time_series = time_series[:int(len(time_series) * train_test_ratio)]
43 | test_time_series = time_series[int(len(time_series) * train_test_ratio) - 1:]
44 |
45 |
46 | # Creates sequences from data
47 | def create_dataset(data, len_seq):
48 | features = []
49 | labels = []
50 | for i in range(len(data) - len_seq):
51 | features.append(data[i:i + len_seq])
52 | labels.append(data[i + len_seq])
53 | return features, labels
54 |
55 |
56 | trainX, trainY = create_dataset(train_time_series, seq_len)
57 | testX, testY = create_dataset(
58 | np.concatenate((trainX[-1], test_time_series)), seq_len)
59 |
60 | # Neural Network
61 |
62 | # hyper-parameters
63 | n_rnn_neurons = 100
64 | n_input_neurons = 1
65 | n_output_neurons = 1
66 |
67 | learn_rate = 0.006
68 |
69 | n_epoch = 1000
70 |
71 | # input/output placeholders
72 | X = tf.placeholder(tf.float32, [None, seq_len, n_input_neurons])
73 | Y = tf.placeholder(tf.float32, [None, n_output_neurons])
74 |
75 | # Weights and biases for final fully connected layer
76 | layer_op = {
77 | 'weight':
78 | tf.Variable(tf.random_normal([n_rnn_neurons, n_output_neurons], stddev=1)),
79 | 'bias':
80 | tf.Variable(tf.random_normal([n_output_neurons], stddev=1))
81 | }
82 |
83 | # Model
84 |
85 | # lstm + droput layer
86 | cell = tf.contrib.rnn.BasicLSTMCell(n_rnn_neurons)
87 | cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.75)
88 | lstm_op, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
89 |
90 | # Just connect last output of hidden layer to fully connected layer
91 | lstm_op = tf.squeeze(lstm_op[:, -1:], axis=1)
92 | final_op = tf.nn.sigmoid(
93 | tf.matmul(lstm_op, layer_op['weight']) + layer_op['bias'])
94 |
95 | # Error and Optimizer
96 |
97 | # mean-squared error
98 | error = tf.reduce_mean(0.5 * tf.square(final_op - Y))
99 |
100 | # adam-optimizer
101 | optimizer = tf.train.AdamOptimizer(learn_rate).minimize(error)
102 |
103 | # Start Session
104 | with tf.Session() as sess:
105 | tf.global_variables_initializer().run()
106 |
107 | print("*********** Train ***********")
108 |
109 | for epoch in range(n_epoch):
110 | _, err = sess.run([optimizer, error], feed_dict={X: trainX, Y: trainY})
111 |
112 | if epoch % 100 == 0:
113 | print("Epoch : %d Error = %f" % (epoch, err))
114 |
115 | print("\n*********** Test ***********")
116 |
117 | err, resultt = sess.run([error, final_op], feed_dict={X: testX, Y: testY})
118 | print("Testing Error : %f" % err)
119 |
120 | # Predict futur values with continuous data
121 | inp = trainX[-1].flatten().tolist()
122 | resultp = []
123 |
124 | for i in range(len(test_time_series)):
125 | op = final_op.eval({X: np.reshape(inp, [1, -1, 1])})
126 | inp.append(op[0][0])
127 | resultp.append(op[0][0])
128 | del inp[0]
129 |
130 | # Plot test and prediction output
131 |
132 | plt.figure(figsize=(12, 8))
133 |
134 | plt.plot(
135 | train_time_series * (ts_max - ts_min) + ts_min,
136 | 'b',
137 | label='training data')
138 | plt.plot(
139 | np.arange(len(train_time_series) - 1, len(time_series)),
140 | test_time_series * (ts_max - ts_min) + ts_min,
141 | 'c',
142 | label='expected data')
143 | plt.plot(
144 | np.arange(len(train_time_series) - 1, len(time_series)),
145 | resultt * (ts_max - ts_min) + ts_min,
146 | 'm',
147 | label='test output')
148 | plt.plot(
149 | np.arange(len(train_time_series) - 1, len(time_series)),
150 | np.array(resultp) * (ts_max - ts_min) + ts_min,
151 | 'r',
152 | label='continous prediction')
153 |
154 | plt.xticks(
155 | np.arange(0, len(time_series), 12),
156 | time_stamps[::12],
157 | rotation=70,
158 | fontsize=7)
159 | plt.xlabel('Month')
160 | plt.ylabel('CO2 (ppm)')
161 | plt.legend(loc='upper left')
162 | plt.show()
163 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/code/tf_rnn_addition.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | # Pre-process data
7 |
8 | # seed random number generators
9 | np.random.seed(1)
10 | tf.set_random_seed(1)
11 |
12 | total_test_cases = 1000
13 | train_test_ratio = 0.8
14 | disp_test = 20
15 |
16 | tmp_list = []
17 | features = []
18 | labels = []
19 |
20 |
21 | # function to reverse bits of a number
22 | def reverse_bits(n, n_bits):
23 | rev = 0
24 | for i in range(n_bits):
25 | if n & (1 << i):
26 | rev |= 1 << ((n_bits - 1) - i)
27 | return rev
28 |
29 |
30 | # generate data (addition)
31 | for i in range(total_test_cases):
32 | a = np.random.randint(0, 128)
33 | b = np.random.randint(0, 128)
34 | c = a + b
35 |
36 | a = reverse_bits(a, 8)
37 | b = reverse_bits(b, 8)
38 | c = reverse_bits(c, 8)
39 |
40 | features.append([a, b])
41 | labels.append(c)
42 |
43 | features = np.array(features, dtype=np.uint8).reshape(-1, 1)
44 | labels = np.array(labels, dtype=np.uint8).reshape(-1, 1)
45 | features = np.unpackbits(features, axis=1)
46 | labels = np.unpackbits(labels, axis=1)
47 |
48 | labels = np.expand_dims(labels, 2)
49 |
50 | for i in range(len(labels)):
51 | tmp_list.append([features[2 * i], features[2 * i + 1]])
52 |
53 | features = np.array(tmp_list)
54 |
55 | # split into train-test set and transpose the array
56 | features_train = np.transpose(features[:int(train_test_ratio * len(features))],
57 | [2, 0, 1])
58 | features_test = np.transpose(features[int(train_test_ratio * len(features)):],
59 | [2, 0, 1])
60 |
61 | labels_train = np.transpose(labels[:int(train_test_ratio * len(labels))],
62 | [1, 0, 2])
63 | labels_test = np.transpose(labels[int(train_test_ratio * len(labels)):],
64 | [1, 0, 2])
65 |
66 | # Neural Network
67 |
68 | # hyper-parameters
69 | n_input_neurons = 2
70 | n_rnn_neurons = 12
71 | n_output_neurons = 1
72 | sequence_len = 8
73 |
74 | learning_rate = 0.01
75 |
76 | n_epochs = 100
77 |
78 | # input/output placeholders
79 | X = tf.placeholder(tf.float32, [sequence_len, None, n_input_neurons])
80 | Y = tf.placeholder(tf.float32, [sequence_len, None, n_output_neurons])
81 |
82 | # weights and biases
83 | layer_op = {
84 | 'weight':
85 | tf.Variable(
86 | tf.random_normal([n_rnn_neurons, n_output_neurons], stddev=0.1)),
87 | 'bias':
88 | tf.Variable(tf.random_normal([n_output_neurons], stddev=0.1))
89 | }
90 |
91 | # Model
92 |
93 | rnn_cell = tf.contrib.rnn.BasicRNNCell(n_rnn_neurons)
94 | rnn_ops, rnn_states = tf.nn.dynamic_rnn(
95 | rnn_cell, X, time_major=True, dtype=tf.float32)
96 |
97 | pred_op = tf.map_fn(
98 | lambda x:
99 | tf.nn.sigmoid(tf.matmul(x, layer_op['weight']) + layer_op['bias']),
100 | rnn_ops)
101 |
102 | # Error and Optimizer
103 |
104 | # mean-squared error
105 | error = tf.reduce_mean(0.5 * tf.square(pred_op - Y))
106 |
107 | # adam-optimizer
108 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)
109 |
110 | # Start Session
111 | with tf.Session() as sess:
112 | tf.global_variables_initializer().run()
113 |
114 | print("########## TRAIN ##########")
115 |
116 | # Epoch training
117 | for epoch in range(n_epochs):
118 | _, err = sess.run(
119 | [optimizer, error], feed_dict={X: features_train,
120 | Y: labels_train})
121 |
122 | if epoch % 10 == 0:
123 | print("Epoch:", epoch, " Error:", err)
124 |
125 | print("\n########## TEST ##########")
126 |
127 | op = pred_op.eval({X: features_test})
128 | op = np.transpose((op > 0.5).astype(int), [1, 0, 2])
129 | op = np.packbits(op)
130 | op = [reverse_bits(x, 8) for x in op]
131 |
132 | c = np.transpose(labels_test, [1, 0, 2])
133 | c = np.packbits(c)
134 | c = [reverse_bits(x, 8) for x in c]
135 |
136 | a = features_test[:, :, 0]
137 | a = np.transpose(a, [1, 0])
138 | a = np.packbits(a)
139 | a = [reverse_bits(x, 8) for x in a]
140 |
141 | b = features_test[:, :, 1]
142 | b = np.transpose(b, [1, 0])
143 | b = np.packbits(b)
144 | b = [reverse_bits(x, 8) for x in b]
145 |
146 | success = 0
147 |
148 | for i in range(len(op)):
149 |
150 | if c == op:
151 | success += 1
152 |
153 | if i < disp_test:
154 | print("%d + %d = %d \t --> %5s " % (a[i], b[i], c[i],
155 | c[i] == op[i]))
156 |
157 | print("\nSuccess: %d/%d, Accuracy = %f" % (success, len(op),
158 | success / len(op) * 100))
159 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/datasets/co2-ppm-mauna-loa-19651980.csv:
--------------------------------------------------------------------------------
1 | "Month","CO2 (ppm) mauna loa, 1965-1980"
2 | "1965-01",319.32
3 | "1965-02",320.36
4 | "1965-03",320.82
5 | "1965-04",322.06
6 | "1965-05",322.17
7 | "1965-06",321.95
8 | "1965-07",321.20
9 | "1965-08",318.81
10 | "1965-09",317.82
11 | "1965-10",317.37
12 | "1965-11",318.93
13 | "1965-12",319.09
14 | "1966-01",319.94
15 | "1966-02",320.98
16 | "1966-03",321.81
17 | "1966-04",323.03
18 | "1966-05",323.36
19 | "1966-06",323.11
20 | "1966-07",321.65
21 | "1966-08",319.64
22 | "1966-09",317.86
23 | "1966-10",317.25
24 | "1966-11",319.06
25 | "1966-12",320.26
26 | "1967-01",321.65
27 | "1967-02",321.81
28 | "1967-03",322.36
29 | "1967-04",323.67
30 | "1967-05",324.17
31 | "1967-06",323.39
32 | "1967-07",321.93
33 | "1967-08",320.29
34 | "1967-09",318.58
35 | "1967-10",318.60
36 | "1967-11",319.98
37 | "1967-12",321.25
38 | "1968-01",321.88
39 | "1968-02",322.47
40 | "1968-03",323.17
41 | "1968-04",324.23
42 | "1968-05",324.88
43 | "1968-06",324.75
44 | "1968-07",323.47
45 | "1968-08",321.34
46 | "1968-09",319.56
47 | "1968-10",319.45
48 | "1968-11",320.45
49 | "1968-12",321.92
50 | "1969-01",323.40
51 | "1969-02",324.21
52 | "1969-03",325.33
53 | "1969-04",326.31
54 | "1969-05",327.01
55 | "1969-06",326.24
56 | "1969-07",325.37
57 | "1969-08",323.12
58 | "1969-09",321.85
59 | "1969-10",321.31
60 | "1969-11",322.31
61 | "1969-12",323.72
62 | "1970-01",324.60
63 | "1970-02",325.57
64 | "1970-03",326.55
65 | "1970-04",327.80
66 | "1970-05",327.80
67 | "1970-06",327.54
68 | "1970-07",326.28
69 | "1970-08",324.63
70 | "1970-09",323.12
71 | "1970-10",323.11
72 | "1970-11",323.99
73 | "1970-12",325.09
74 | "1971-01",326.12
75 | "1971-02",326.61
76 | "1971-03",327.16
77 | "1971-04",327.92
78 | "1971-05",329.14
79 | "1971-06",328.80
80 | "1971-07",327.52
81 | "1971-08",325.62
82 | "1971-09",323.61
83 | "1971-10",323.80
84 | "1971-11",325.10
85 | "1971-12",326.25
86 | "1972-01",326.93
87 | "1972-02",327.83
88 | "1972-03",327.95
89 | "1972-04",329.91
90 | "1972-05",330.22
91 | "1972-06",329.25
92 | "1972-07",328.11
93 | "1972-08",326.39
94 | "1972-09",324.97
95 | "1972-10",325.32
96 | "1972-11",326.54
97 | "1972-12",327.71
98 | "1973-01",328.73
99 | "1973-02",329.69
100 | "1973-03",330.47
101 | "1973-04",331.69
102 | "1973-05",332.65
103 | "1973-06",332.24
104 | "1973-07",331.03
105 | "1973-08",329.36
106 | "1973-09",327.60
107 | "1973-10",327.29
108 | "1973-11",328.28
109 | "1973-12",328.79
110 | "1974-01",329.45
111 | "1974-02",330.89
112 | "1974-03",331.63
113 | "1974-04",332.85
114 | "1974-05",333.28
115 | "1974-06",332.47
116 | "1974-07",331.34
117 | "1974-08",329.53
118 | "1974-09",327.57
119 | "1974-10",327.57
120 | "1974-11",328.53
121 | "1974-12",329.69
122 | "1975-01",330.45
123 | "1975-02",330.97
124 | "1975-03",331.64
125 | "1975-04",332.87
126 | "1975-05",333.61
127 | "1975-06",333.55
128 | "1975-07",331.90
129 | "1975-08",330.05
130 | "1975-09",328.58
131 | "1975-10",328.31
132 | "1975-11",329.41
133 | "1975-12",330.63
134 | "1976-01",331.63
135 | "1976-02",332.46
136 | "1976-03",333.36
137 | "1976-04",334.45
138 | "1976-05",334.82
139 | "1976-06",334.32
140 | "1976-07",333.05
141 | "1976-08",330.87
142 | "1976-09",329.24
143 | "1976-10",328.87
144 | "1976-11",330.18
145 | "1976-12",331.50
146 | "1977-01",332.81
147 | "1977-02",333.23
148 | "1977-03",334.55
149 | "1977-04",335.82
150 | "1977-05",336.44
151 | "1977-06",335.99
152 | "1977-07",334.65
153 | "1977-08",332.41
154 | "1977-09",331.32
155 | "1977-10",330.73
156 | "1977-11",332.05
157 | "1977-12",333.53
158 | "1978-01",334.66
159 | "1978-02",335.07
160 | "1978-03",336.33
161 | "1978-04",337.39
162 | "1978-05",337.65
163 | "1978-06",337.57
164 | "1978-07",336.25
165 | "1978-08",334.39
166 | "1978-09",332.44
167 | "1978-10",332.25
168 | "1978-11",333.59
169 | "1978-12",334.76
170 | "1979-01",335.89
171 | "1979-02",336.44
172 | "1979-03",337.63
173 | "1979-04",338.54
174 | "1979-05",339.06
175 | "1979-06",338.95
176 | "1979-07",337.41
177 | "1979-08",335.71
178 | "1979-09",333.68
179 | "1979-10",333.69
180 | "1979-11",335.05
181 | "1979-12",336.53
182 | "1980-01",337.81
183 | "1980-02",338.16
184 | "1980-03",339.88
185 | "1980-04",340.57
186 | "1980-05",341.19
187 | "1980-06",340.87
188 | "1980-07",339.25
189 | "1980-08",337.19
190 | "1980-09",335.49
191 | "1980-10",336.63
192 | "1980-11",337.74
193 | "1980-12",338.36
194 |
195 | CO2 (ppm) mauna loa, 1965-1980
196 |
197 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/np_rnn_addition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Recurrent Neural Network\n",
8 | "###### Addition using numpy"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "metadata": {},
32 | "source": [
33 | "### Pre-process data"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": 2,
39 | "metadata": {
40 | "collapsed": true
41 | },
42 | "outputs": [],
43 | "source": [
44 | "# seed random number generator\n",
45 | "np.random.seed(1)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 3,
51 | "metadata": {
52 | "collapsed": true
53 | },
54 | "outputs": [],
55 | "source": [
56 | "total_test_cases = 100\n",
57 | "train_test_ratio = 0.80\n",
58 | "\n",
59 | "tmp_list = []\n",
60 | "features = []\n",
61 | "labels = []"
62 | ]
63 | },
64 | {
65 | "cell_type": "markdown",
66 | "metadata": {},
67 | "source": [
68 | "generate data (addition)"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 4,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "for _ in range(total_test_cases):\n",
78 | " a = np.random.randint(0, 128)\n",
79 | " b = np.random.randint(0, 128)\n",
80 | " c = a + b\n",
81 | "\n",
82 | " features.append([a, b])\n",
83 | " labels.append(c)\n",
84 | "\n",
85 | "features = np.array(features, dtype=np.uint8).reshape(-1, 1)\n",
86 | "labels = np.array(labels, dtype=np.uint8).reshape(-1, 1)\n",
87 | "features = np.unpackbits(features, axis=1)\n",
88 | "labels = np.unpackbits(labels, axis=1)\n",
89 | "\n",
90 | "for i in range(len(labels)):\n",
91 | " tmp_list.append([features[2 * i], features[2 * i + 1]])\n",
92 | "\n",
93 | "features = np.array(tmp_list)"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "split into train-test set"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 5,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "features_train = np.array(features[:int(train_test_ratio * len(features))])\n",
110 | "features_test = np.array(features[int(train_test_ratio * len(features)):])\n",
111 | "\n",
112 | "labels_train = labels[:int(train_test_ratio * len(labels))]\n",
113 | "labels_test = labels[int(train_test_ratio * len(labels)):]"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {},
119 | "source": [
120 | "## Neural Network"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "metadata": {},
126 | "source": [
127 | "hyper-parameters"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": 6,
133 | "metadata": {
134 | "collapsed": true
135 | },
136 | "outputs": [],
137 | "source": [
138 | "n_input_layers = 2\n",
139 | "n_hidden_layers = 16\n",
140 | "n_output_layers = 1\n",
141 | "n_sequence = 8\n",
142 | "\n",
143 | "learning_rate = 1\n",
144 | "\n",
145 | "n_epochs = 100"
146 | ]
147 | },
148 | {
149 | "cell_type": "markdown",
150 | "metadata": {},
151 | "source": [
152 | "Activation functions and their derivative"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": 7,
158 | "metadata": {
159 | "collapsed": true
160 | },
161 | "outputs": [],
162 | "source": [
163 | "activation_f = {\n",
164 | " 'identity': lambda f_x: f_x,\n",
165 | " 'sigmoid': lambda f_x: 1.0 / (1.0 + np.exp(-f_x)),\n",
166 | " 'tanh': lambda f_x: np.tanh(f_x),\n",
167 | " 'arctan': lambda f_x: np.arctan(f_x),\n",
168 | " 'relu': lambda f_x: f_x * (f_x > 0),\n",
169 | " 'softplus': lambda f_x: np.log(1 + np.exp(f_x)),\n",
170 | " 'sinusoid': lambda f_x: np.sin(f_x),\n",
171 | " 'gaussian': lambda f_x: np.exp(-f_x * f_x)\n",
172 | "}\n",
173 | "activation_f_prime = {\n",
174 | " 'identity': lambda f_dx: 1,\n",
175 | " 'sigmoid': lambda f_dx: f_dx * (1.0 - f_dx),\n",
176 | " 'tanh': lambda f_dx: 1.0 - f_dx**2,\n",
177 | " 'arctan': lambda f_dx: 1.0 / (1.0 + np.tan(f_dx)**2),\n",
178 | " 'relu': lambda f_dx: 1.0 * (f_dx > 0),\n",
179 | " 'softplus': lambda f_dx: 1.0 - np.exp(-f_dx),\n",
180 | " 'sinusoid': lambda f_dx: np.cos(np.arcsin(f_dx)),\n",
181 | " 'gaussian': lambda f_dx: -2 * f_dx * np.sqrt(-np.log(f_dx))\n",
182 | "}"
183 | ]
184 | },
185 | {
186 | "cell_type": "markdown",
187 | "metadata": {},
188 | "source": [
189 | "Activation function parameters"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": 8,
195 | "metadata": {
196 | "collapsed": true
197 | },
198 | "outputs": [],
199 | "source": [
200 | "f1 = 'sigmoid'\n",
201 | "f2 = 'sigmoid'\n",
202 | "\n",
203 | "act_f1 = activation_f[f1]\n",
204 | "act_f2 = activation_f[f2]\n",
205 | "\n",
206 | "act_f1_prime = activation_f_prime[f1]\n",
207 | "act_f2_prime = activation_f_prime[f2]"
208 | ]
209 | },
210 | {
211 | "cell_type": "markdown",
212 | "metadata": {},
213 | "source": [
214 | "Initialize random weights"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 9,
220 | "metadata": {
221 | "collapsed": true
222 | },
223 | "outputs": [],
224 | "source": [
225 | "V = np.random.normal(scale=0.1, size=(n_input_layers, n_hidden_layers))\n",
226 | "W = np.random.normal(scale=0.1, size=(n_hidden_layers, n_output_layers))\n",
227 | "R = np.random.normal(scale=0.1, size=(n_hidden_layers, n_hidden_layers))"
228 | ]
229 | },
230 | {
231 | "cell_type": "markdown",
232 | "metadata": {},
233 | "source": [
234 | "### Train"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": 10,
240 | "metadata": {},
241 | "outputs": [
242 | {
243 | "name": "stdout",
244 | "output_type": "stream",
245 | "text": [
246 | "########## TRAIN ##########\n",
247 | "Epoch: 0 Error: 0.141682\n",
248 | "Epoch: 10 Error: 0.106657\n",
249 | "Epoch: 20 Error: 0.064489\n",
250 | "Epoch: 30 Error: 0.002379\n",
251 | "Epoch: 40 Error: 0.000923\n",
252 | "Epoch: 50 Error: 0.000556\n",
253 | "Epoch: 60 Error: 0.000393\n",
254 | "Epoch: 70 Error: 0.000302\n",
255 | "Epoch: 80 Error: 0.000243\n",
256 | "Epoch: 90 Error: 0.000203\n"
257 | ]
258 | }
259 | ],
260 | "source": [
261 | "print(\"########## TRAIN ##########\")\n",
262 | "\n",
263 | "# Training-set\n",
264 | "X = features_train\n",
265 | "Y = labels_train\n",
266 | "\n",
267 | "# Epoch-training\n",
268 | "for e in range(n_epochs):\n",
269 | "\n",
270 | " E = 0\n",
271 | "\n",
272 | " for i in range(X.shape[0]):\n",
273 | "\n",
274 | " err = 0\n",
275 | "\n",
276 | " V_update = np.zeros_like(V)\n",
277 | " W_update = np.zeros_like(W)\n",
278 | " R_update = np.zeros_like(R)\n",
279 | "\n",
280 | " h_layers = [np.zeros((1, n_hidden_layers))]\n",
281 | "\n",
282 | " dels = []\n",
283 | "\n",
284 | " # Forward Pass\n",
285 | " for j in range(n_sequence):\n",
286 | "\n",
287 | " # Forward Prop\n",
288 | " x = np.array([X[i][0][-j - 1], X[i][1][-j - 1]]).reshape(1, -1)\n",
289 | " y = np.array(Y[i][-j - 1])\n",
290 | "\n",
291 | " h_inter = np.dot(x, V) + np.dot(h_layers[-1], R)\n",
292 | " h_final = act_f1(h_inter)\n",
293 | " o_inter = np.dot(h_final, W)\n",
294 | " o_final = act_f2(o_inter)\n",
295 | "\n",
296 | " # Store hidden layer\n",
297 | " h_layers.append(h_final)\n",
298 | "\n",
299 | " err += (0.5 * np.square(y - o_final))[0][0]\n",
300 | "\n",
301 | " # Backward Prop\n",
302 | " del_h_o = -np.multiply(y - o_final, act_f2_prime(o_final))\n",
303 | "\n",
304 | " # Store delta\n",
305 | " dels.append(del_h_o)\n",
306 | "\n",
307 | " change_h_o = np.dot(h_final.T, del_h_o)\n",
308 | " W_update += change_h_o\n",
309 | "\n",
310 | " next_del = np.zeros(n_hidden_layers)\n",
311 | "\n",
312 | " # Backward Propagation through time\n",
313 | " for j in range(n_sequence):\n",
314 | " x = np.array([X[i][0][j], X[i][1][j]]).reshape(1, -1)\n",
315 | "\n",
316 | " del_h = (np.dot(next_del, R.T) + np.dot(dels[-j - 1], W.T)) * act_f1_prime(h_layers[-j - 1])\n",
317 | "\n",
318 | " change_h_h = np.dot(h_layers[-j - 2].T, del_h)\n",
319 | " change_i_h = np.dot(x.T, del_h)\n",
320 | "\n",
321 | " R_update += change_h_h\n",
322 | " V_update += change_i_h\n",
323 | "\n",
324 | " next_del = del_h\n",
325 | "\n",
326 | " E += err / n_sequence\n",
327 | "\n",
328 | " # Adjust Weights\n",
329 | " V -= V_update * learning_rate\n",
330 | " W -= W_update * learning_rate\n",
331 | " R -= R_update * learning_rate\n",
332 | "\n",
333 | " if e % 10 == 0:\n",
334 | " print(\"Epoch: %d Error: %f\" % (e, E / X.shape[0]))"
335 | ]
336 | },
337 | {
338 | "cell_type": "markdown",
339 | "metadata": {},
340 | "source": [
341 | "### Test"
342 | ]
343 | },
344 | {
345 | "cell_type": "code",
346 | "execution_count": 11,
347 | "metadata": {},
348 | "outputs": [
349 | {
350 | "name": "stdout",
351 | "output_type": "stream",
352 | "text": [
353 | "########## TEST ##########\n",
354 | "23 + 124 = 147 \t --> True \n",
355 | "81 + 7 = 88 \t --> True \n",
356 | "121 + 24 = 145 \t --> True \n",
357 | "74 + 92 = 166 \t --> True \n",
358 | "20 + 32 = 52 \t --> True \n",
359 | "12 + 65 = 77 \t --> True \n",
360 | "94 + 60 = 154 \t --> True \n",
361 | "105 + 24 = 129 \t --> True \n",
362 | "82 + 115 = 197 \t --> True \n",
363 | "97 + 2 = 99 \t --> True \n",
364 | "108 + 92 = 200 \t --> True \n",
365 | "103 + 98 = 201 \t --> True \n",
366 | "10 + 54 = 64 \t --> True \n",
367 | "96 + 105 = 201 \t --> True \n",
368 | "82 + 86 = 168 \t --> True \n",
369 | "70 + 66 = 136 \t --> True \n",
370 | "71 + 103 = 174 \t --> True \n",
371 | "48 + 54 = 102 \t --> True \n",
372 | "15 + 5 = 20 \t --> True \n",
373 | "17 + 42 = 59 \t --> True \n",
374 | "\n",
375 | "Success: 20/20, Accuracy = 100.000000\n"
376 | ]
377 | }
378 | ],
379 | "source": [
380 | "print(\"########## TEST ##########\")\n",
381 | "\n",
382 | "# Test-set\n",
383 | "X = features_test\n",
384 | "Y = labels_test\n",
385 | "\n",
386 | "success = 0\n",
387 | "\n",
388 | "# Start Test\n",
389 | "for i in range(X.shape[0]):\n",
390 | "\n",
391 | " a = np.packbits(X[i][0])[0]\n",
392 | " b = np.packbits(X[i][1])[0]\n",
393 | "\n",
394 | " d = np.packbits(Y[i])[0]\n",
395 | "\n",
396 | " c = []\n",
397 | "\n",
398 | " h_layer = np.zeros((1, n_hidden_layers))\n",
399 | "\n",
400 | " for j in range(n_sequence):\n",
401 | " x = np.array([X[i][0][-j - 1], X[i][1][-j - 1]]).reshape(1, -1)\n",
402 | " y = np.array(Y[i][-j - 1])\n",
403 | "\n",
404 | " # Forward prop\n",
405 | " h_inter = np.dot(x, V) + np.dot(h_layer, R)\n",
406 | " h_final = act_f1(h_inter)\n",
407 | " o_inter = np.dot(h_final, W)\n",
408 | " o_final = act_f2(o_inter)\n",
409 | "\n",
410 | " h_layer = h_final\n",
411 | "\n",
412 | " c.insert(0, (o_final > 0.5).astype(int)[0][0])\n",
413 | "\n",
414 | " c = np.packbits(c)[0]\n",
415 | "\n",
416 | " if c == d:\n",
417 | " success += 1\n",
418 | "\n",
419 | " print(\"%d + %d = %d \\t --> %5s \" % (a, b, c, c == d))\n",
420 | "\n",
421 | "print(\"\\nSuccess: %d/%d, Accuracy = %f\" % (success, X.shape[0], success / X.shape[0] * 100))"
422 | ]
423 | }
424 | ],
425 | "metadata": {
426 | "kernelspec": {
427 | "display_name": "Python 3",
428 | "language": "python",
429 | "name": "python3"
430 | },
431 | "language_info": {
432 | "codemirror_mode": {
433 | "name": "ipython",
434 | "version": 3
435 | },
436 | "file_extension": ".py",
437 | "mimetype": "text/x-python",
438 | "name": "python",
439 | "nbconvert_exporter": "python",
440 | "pygments_lexer": "ipython3",
441 | "version": "3.6.0"
442 | }
443 | },
444 | "nbformat": 4,
445 | "nbformat_minor": 2
446 | }
447 |
--------------------------------------------------------------------------------
/Recurrent-NeuralNet/tf_rnn_addition.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Recurrent Neural Network\n",
8 | "###### Addition using tensorflow"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import tensorflow as tf"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "### Pre-process data"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 2,
40 | "metadata": {
41 | "collapsed": true
42 | },
43 | "outputs": [],
44 | "source": [
45 | "# seed random number generators\n",
46 | "np.random.seed(1)\n",
47 | "tf.set_random_seed(1)"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": 3,
53 | "metadata": {
54 | "collapsed": true
55 | },
56 | "outputs": [],
57 | "source": [
58 | "total_test_cases = 1000\n",
59 | "train_test_ratio = 0.8\n",
60 | "disp_test = 20\n",
61 | "\n",
62 | "tmp_list = []\n",
63 | "features = []\n",
64 | "labels = []"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "function to reverse bits of a number"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 4,
77 | "metadata": {
78 | "collapsed": true
79 | },
80 | "outputs": [],
81 | "source": [
82 | "def reverse_bits(n, n_bits):\n",
83 | " rev = 0\n",
84 | " for i in range(n_bits):\n",
85 | " if n & (1 << i):\n",
86 | " rev |= 1 << ((n_bits - 1) - i)\n",
87 | " return rev"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "generate data (addition)"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 5,
100 | "metadata": {
101 | "collapsed": true
102 | },
103 | "outputs": [],
104 | "source": [
105 | "for i in range(total_test_cases):\n",
106 | " a = np.random.randint(0, 128)\n",
107 | " b = np.random.randint(0, 128)\n",
108 | " c = a + b\n",
109 | "\n",
110 | " a = reverse_bits(a, 8)\n",
111 | " b = reverse_bits(b, 8)\n",
112 | " c = reverse_bits(c, 8)\n",
113 | "\n",
114 | " features.append([a, b])\n",
115 | " labels.append(c)\n",
116 | "\n",
117 | "features = np.array(features, dtype=np.uint8).reshape(-1, 1)\n",
118 | "labels = np.array(labels, dtype=np.uint8).reshape(-1, 1)\n",
119 | "features = np.unpackbits(features, axis=1)\n",
120 | "labels = np.unpackbits(labels, axis=1)\n",
121 | "\n",
122 | "labels = np.expand_dims(labels, 2)\n",
123 | "\n",
124 | "for i in range(len(labels)):\n",
125 | " tmp_list.append([features[2 * i], features[2 * i + 1]])\n",
126 | "\n",
127 | "features = np.array(tmp_list)"
128 | ]
129 | },
130 | {
131 | "cell_type": "markdown",
132 | "metadata": {},
133 | "source": [
134 | "split into train-test set and transpose the array"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": 6,
140 | "metadata": {
141 | "collapsed": true
142 | },
143 | "outputs": [],
144 | "source": [
145 | "features_train = np.transpose(features[:int(train_test_ratio * len(features))], [2, 0, 1])\n",
146 | "features_test = np.transpose(features[int(train_test_ratio * len(features)):], [2, 0, 1])\n",
147 | "\n",
148 | "labels_train = np.transpose(labels[:int(train_test_ratio * len(labels))], [1, 0, 2])\n",
149 | "labels_test = np.transpose(labels[int(train_test_ratio * len(labels)):], [1, 0, 2])"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "## Neural Network"
157 | ]
158 | },
159 | {
160 | "cell_type": "markdown",
161 | "metadata": {},
162 | "source": [
163 | "hyper-parameters"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": 7,
169 | "metadata": {
170 | "collapsed": true
171 | },
172 | "outputs": [],
173 | "source": [
174 | "n_input_neurons = 2\n",
175 | "n_rnn_neurons = 12\n",
176 | "n_output_neurons = 1\n",
177 | "sequence_len = 8\n",
178 | "\n",
179 | "learning_rate = 0.01\n",
180 | "\n",
181 | "n_epochs = 100"
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {},
187 | "source": [
188 | "input/output placeholders"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 8,
194 | "metadata": {
195 | "collapsed": true
196 | },
197 | "outputs": [],
198 | "source": [
199 | "X = tf.placeholder(tf.float32, [sequence_len, None, n_input_neurons])\n",
200 | "Y = tf.placeholder(tf.float32, [sequence_len, None, n_output_neurons])"
201 | ]
202 | },
203 | {
204 | "cell_type": "markdown",
205 | "metadata": {},
206 | "source": [
207 | "weights and biases"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": 9,
213 | "metadata": {
214 | "collapsed": true
215 | },
216 | "outputs": [],
217 | "source": [
218 | "layer_op = {\n",
219 | " 'weight': tf.Variable(tf.random_normal([n_rnn_neurons, n_output_neurons], stddev=0.1)),\n",
220 | " 'bias': tf.Variable(tf.random_normal([n_output_neurons], stddev=0.1))\n",
221 | "}"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "#### Model"
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "execution_count": 10,
234 | "metadata": {},
235 | "outputs": [],
236 | "source": [
237 | "rnn_cell = tf.contrib.rnn.BasicRNNCell(n_rnn_neurons)\n",
238 | "rnn_ops, rnn_states = tf.nn.dynamic_rnn(rnn_cell, X, time_major=True, dtype=tf.float32)\n",
239 | "\n",
240 | "pred_op = tf.map_fn(lambda x: tf.nn.sigmoid(tf.matmul(x, layer_op['weight']) + layer_op['bias']), rnn_ops)"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "#### Error and Optimizer"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": 11,
253 | "metadata": {
254 | "collapsed": true
255 | },
256 | "outputs": [],
257 | "source": [
258 | "# mean-squared error\n",
259 | "error = tf.reduce_mean(0.5 * tf.square(pred_op - Y))\n",
260 | "\n",
261 | "# adam-optimizer\n",
262 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(error)"
263 | ]
264 | },
265 | {
266 | "cell_type": "markdown",
267 | "metadata": {},
268 | "source": [
269 | "#### Start Session"
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": 12,
275 | "metadata": {},
276 | "outputs": [
277 | {
278 | "name": "stdout",
279 | "output_type": "stream",
280 | "text": [
281 | "########## TRAIN ##########\n",
282 | "Epoch: 0 Error: 0.125607\n",
283 | "Epoch: 10 Error: 0.123912\n",
284 | "Epoch: 20 Error: 0.12229\n",
285 | "Epoch: 30 Error: 0.120747\n",
286 | "Epoch: 40 Error: 0.117637\n",
287 | "Epoch: 50 Error: 0.110348\n",
288 | "Epoch: 60 Error: 0.0956172\n",
289 | "Epoch: 70 Error: 0.0716852\n",
290 | "Epoch: 80 Error: 0.0418827\n",
291 | "Epoch: 90 Error: 0.0218792\n",
292 | "\n",
293 | "########## TEST ##########\n",
294 | "13 + 126 = 139 \t --> True \n",
295 | "11 + 42 = 53 \t --> True \n",
296 | "10 + 88 = 98 \t --> True \n",
297 | "24 + 47 = 71 \t --> True \n",
298 | "121 + 68 = 189 \t --> True \n",
299 | "43 + 91 = 134 \t --> True \n",
300 | "121 + 13 = 134 \t --> True \n",
301 | "6 + 2 = 8 \t --> True \n",
302 | "67 + 93 = 160 \t --> True \n",
303 | "106 + 4 = 110 \t --> True \n",
304 | "28 + 35 = 63 \t --> True \n",
305 | "101 + 82 = 183 \t --> True \n",
306 | "102 + 77 = 179 \t --> True \n",
307 | "46 + 78 = 124 \t --> True \n",
308 | "105 + 6 = 111 \t --> True \n",
309 | "91 + 80 = 171 \t --> True \n",
310 | "20 + 23 = 43 \t --> True \n",
311 | "63 + 99 = 162 \t --> True \n",
312 | "71 + 6 = 77 \t --> True \n",
313 | "21 + 64 = 85 \t --> True \n",
314 | "\n",
315 | "Success: 200/200, Accuracy = 100.000000\n"
316 | ]
317 | }
318 | ],
319 | "source": [
320 | "with tf.Session() as sess:\n",
321 | " tf.global_variables_initializer().run()\n",
322 | "\n",
323 | " print(\"########## TRAIN ##########\")\n",
324 | "\n",
325 | " # Epoch training\n",
326 | " for epoch in range(n_epochs):\n",
327 | " _, err = sess.run([optimizer, error], feed_dict={X: features_train, Y: labels_train})\n",
328 | " \n",
329 | " if epoch % 10 == 0:\n",
330 | " print(\"Epoch:\", epoch, \" Error:\", err)\n",
331 | "\n",
332 | " print(\"\\n########## TEST ##########\")\n",
333 | "\n",
334 | " op = pred_op.eval({X: features_test})\n",
335 | " op = np.transpose((op > 0.5).astype(int), [1, 0, 2])\n",
336 | " op = np.packbits(op)\n",
337 | " op = [reverse_bits(x, 8) for x in op]\n",
338 | " \n",
339 | " c = np.transpose(labels_test, [1, 0, 2])\n",
340 | " c = np.packbits(c)\n",
341 | " c = [reverse_bits(x, 8) for x in c]\n",
342 | " \n",
343 | " a = features_test[:, :, 0]\n",
344 | " a = np.transpose(a, [1, 0])\n",
345 | " a = np.packbits(a)\n",
346 | " a = [reverse_bits(x, 8) for x in a]\n",
347 | " \n",
348 | " b = features_test[:, :, 1]\n",
349 | " b = np.transpose(b, [1, 0])\n",
350 | " b = np.packbits(b)\n",
351 | " b = [reverse_bits(x, 8) for x in b]\n",
352 | " \n",
353 | " success = 0\n",
354 | " \n",
355 | " for i in range(len(op)):\n",
356 | " \n",
357 | " if c == op:\n",
358 | " success += 1\n",
359 | " \n",
360 | " if i < disp_test:\n",
361 | " print(\"%d + %d = %d \\t --> %5s \" % (a[i], b[i], c[i], c[i] == op[i]))\n",
362 | " \n",
363 | " print(\"\\nSuccess: %d/%d, Accuracy = %f\" % (success, len(op), success / len(op) * 100))"
364 | ]
365 | }
366 | ],
367 | "metadata": {
368 | "kernelspec": {
369 | "display_name": "Python 3",
370 | "language": "python",
371 | "name": "python3"
372 | },
373 | "language_info": {
374 | "codemirror_mode": {
375 | "name": "ipython",
376 | "version": 3
377 | },
378 | "file_extension": ".py",
379 | "mimetype": "text/x-python",
380 | "name": "python",
381 | "nbconvert_exporter": "python",
382 | "pygments_lexer": "ipython3",
383 | "version": "3.6.0"
384 | }
385 | },
386 | "nbformat": 4,
387 | "nbformat_minor": 2
388 | }
389 |
--------------------------------------------------------------------------------
/Restricted-Boltzmann-Machine/code/tf_rbm_mnist_reconstruction.py:
--------------------------------------------------------------------------------
1 | # Import dependencies
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from tensorflow.examples.tutorials.mnist import input_data
6 | import matplotlib.pyplot as plt
7 |
8 | # Util functions
9 |
10 |
11 | # function to plot the images after during testing phase
12 | def plot_images(images, title, no_i_x, no_i_y=2):
13 | fig = plt.figure(figsize=(5, 15))
14 | fig.canvas.set_window_title(title)
15 | images = np.array(images).reshape(-1, 28, 28)
16 | for i in range(no_i_x):
17 | for j in range(no_i_y):
18 | ax = fig.add_subplot(no_i_x, no_i_y, no_i_y * i + (j + 1))
19 | ax.matshow(images[no_i_x * j + i], cmap="gray")
20 | plt.xticks(np.array([]))
21 | plt.yticks(np.array([]))
22 |
23 | if j == 0 and i == 0:
24 | ax.set_title("Real")
25 | elif j == 1 and i == 0:
26 | ax.set_title("Reconstructed")
27 |
28 |
29 | # load the mnist dataset from tensorflow.examples
30 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
31 | features_train, features_test, features_valid = \
32 | mnist.train.images, mnist.test.images, mnist.validation.images
33 |
34 | # Neural Network Model
35 |
36 | # Hyper-parameters
37 | n_input_layer = features_train.shape[1]
38 | n_hidden_layer = 500
39 |
40 | learning_rate = 0.05
41 |
42 | n_epoch = 10
43 | batch_size = 100
44 |
45 | test_disp = 10 # no of images in plot
46 |
47 | # Placeholders
48 | X = tf.placeholder(tf.float32, [None, n_input_layer])
49 | W = tf.placeholder(tf.float32, [n_input_layer, n_hidden_layer])
50 | B = tf.placeholder(tf.float32, [n_hidden_layer])
51 | C = tf.placeholder(tf.float32, [n_input_layer])
52 |
53 |
54 | # function to get random sample from input
55 | def get_sample(inp):
56 | distr = tf.random_uniform(shape=tf.shape(inp))
57 | sample = tf.nn.relu(tf.sign(inp - distr))
58 | return sample
59 |
60 |
61 | # Model (Training)
62 | # using contrastive-divergence-k(k = 1)
63 | # x = input
64 | # h(x) = P(h|x) = s_h
65 | # h(x̄) = P(h) = s_h1 (k=1)
66 | # W = W + lr(xᵀh(x) − x̄ᵀh(x̄))
67 | # B = B + lr(h(x) − h(x̄))
68 | # C = C + lr(x − x̄)
69 |
70 | # visible-> hidden
71 | p_h = tf.nn.sigmoid(tf.nn.xw_plus_b(X, W, B))
72 | s_h = get_sample(p_h)
73 |
74 | # hidden -> visible
75 | p_v = tf.nn.sigmoid(tf.nn.xw_plus_b(s_h, tf.transpose(W), C)) # reconstruction
76 | s_v = get_sample(p_v)
77 |
78 | # visible(1) -> hiddden
79 | p_h1 = tf.nn.sigmoid(tf.nn.xw_plus_b(s_v, W, B))
80 | s_h1 = get_sample(p_h1)
81 |
82 | # error - just for measuring correctness of reconstructed image
83 | error = tf.losses.mean_squared_error(labels=X, predictions=p_v)
84 |
85 | # positive and negative phase gradients
86 | positive_phase = tf.matmul(tf.transpose(X), s_h)
87 | negative_phase = tf.matmul(tf.transpose(s_v), s_h1)
88 |
89 | contr_div = (positive_phase - negative_phase) / tf.to_float(tf.shape(X)[0])
90 |
91 | # calculate delta for var
92 | change_w = contr_div
93 | change_b = tf.reduce_mean((s_h - s_h1), axis=0)
94 | change_c = tf.reduce_mean((X - s_v), axis=0)
95 |
96 | # Adjust Weights
97 | new_W = W + learning_rate * change_w
98 | new_B = B + learning_rate * change_b
99 | new_C = C + learning_rate * change_c
100 |
101 | # Train Neural Network
102 |
103 | # Initialize random Weights and biases
104 | w = np.random.uniform(0.1, size=(n_input_layer, n_hidden_layer))
105 | b = np.random.uniform(0.1, size=n_hidden_layer)
106 | c = np.random.uniform(0.1, size=n_input_layer)
107 |
108 | # split into batches
109 | n_batch = features_train.shape[0] // batch_size
110 | batched_data = np.split(features_train, n_batch)
111 |
112 | # Start session
113 |
114 | disp_imgs = []
115 |
116 | with tf.Session() as sess:
117 | tf.global_variables_initializer().run()
118 |
119 | print("*********** Train ***********")
120 |
121 | # Epoch-training
122 | for epoch in range(n_epoch):
123 | err = []
124 |
125 | # Batch training
126 | for b_idx in range(n_batch):
127 | e, w, b, c = sess.run([error, new_W, new_B, new_C], feed_dict={
128 | X: batched_data[b_idx], W: w, B: b, C: c})
129 |
130 | err.append(e)
131 |
132 | val_e = error.eval({X: features_valid, W: w, B: b, C: c})
133 |
134 | print("Epoch: %d, Training-error: %.8f, Validation-error: %.8f" %
135 | (epoch, sum(err) / len(err), val_e))
136 |
137 | print("*********** Test ***********")
138 |
139 | # Test-Reconstruction
140 | test_e, reconstd_image = sess.run([error, p_v], feed_dict={
141 | X: features_test, W: w, B: b, C: c})
142 |
143 | print("Test-error: %.8f" % test_e)
144 |
145 | disp_imgs.extend(features_test[:test_disp])
146 | disp_imgs.extend(reconstd_image[:test_disp])
147 |
148 | # plot output
149 | plot_images(disp_imgs, "Restricted Boltzmann Machine", test_disp)
150 | plt.show()
151 |
--------------------------------------------------------------------------------
/Restricted-Boltzmann-Machine/tf_rbm_mnist_reconstruction.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Restricted Boltzmann Machine\n",
8 | "##### MNIST reconstruction using tensorflow"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "Import dependencies"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": 1,
21 | "metadata": {
22 | "collapsed": true
23 | },
24 | "outputs": [],
25 | "source": [
26 | "import numpy as np\n",
27 | "import tensorflow as tf\n",
28 | "from tensorflow.examples.tutorials.mnist import input_data\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "%matplotlib inline"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "### Util functions"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "function to plot the images after during testing phase"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 2,
50 | "metadata": {
51 | "collapsed": true
52 | },
53 | "outputs": [],
54 | "source": [
55 | "def plot_images(images, title, no_i_x, no_i_y=2):\n",
56 | " fig = plt.figure(figsize=(5, 15))\n",
57 | " fig.canvas.set_window_title(title)\n",
58 | " images = np.array(images).reshape(-1, 28, 28)\n",
59 | " for i in range(no_i_x):\n",
60 | " for j in range(no_i_y):\n",
61 | " ax = fig.add_subplot(no_i_x, no_i_y, no_i_y * i + (j + 1))\n",
62 | " ax.matshow(images[no_i_x * j + i], cmap=\"gray\")\n",
63 | " plt.xticks(np.array([]))\n",
64 | " plt.yticks(np.array([]))\n",
65 | "\n",
66 | " if j == 0 and i == 0:\n",
67 | " ax.set_title(\"Real\")\n",
68 | " elif j == 1 and i == 0:\n",
69 | " ax.set_title(\"Reconstructed\")"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "load the mnist dataset from tensorflow.examples"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": 3,
82 | "metadata": {},
83 | "outputs": [
84 | {
85 | "name": "stdout",
86 | "output_type": "stream",
87 | "text": [
88 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
89 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
90 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
91 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
92 | ]
93 | }
94 | ],
95 | "source": [
96 | "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n",
97 | "features_train, features_test, features_valid = \\\n",
98 | " mnist.train.images, mnist.test.images, mnist.validation.images"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {},
104 | "source": [
105 | "### Neural Network Model"
106 | ]
107 | },
108 | {
109 | "cell_type": "markdown",
110 | "metadata": {},
111 | "source": [
112 | "
Hyper-parameters"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 4,
118 | "metadata": {
119 | "collapsed": true
120 | },
121 | "outputs": [],
122 | "source": [
123 | "n_input_layer = features_train.shape[1]\n",
124 | "n_hidden_layer = 500\n",
125 | "\n",
126 | "learning_rate = 0.05\n",
127 | "\n",
128 | "n_epoch = 10\n",
129 | "batch_size = 100\n",
130 | "\n",
131 | "test_disp = 10 # no of images in plot"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "Placeholders"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": 5,
144 | "metadata": {
145 | "collapsed": true
146 | },
147 | "outputs": [],
148 | "source": [
149 | "X = tf.placeholder(tf.float32, [None, n_input_layer])\n",
150 | "W = tf.placeholder(tf.float32, [n_input_layer, n_hidden_layer])\n",
151 | "B = tf.placeholder(tf.float32, [n_hidden_layer])\n",
152 | "C = tf.placeholder(tf.float32, [n_input_layer])"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "function to get random sample from input"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": 6,
165 | "metadata": {
166 | "collapsed": true
167 | },
168 | "outputs": [],
169 | "source": [
170 | "def get_sample(inp):\n",
171 | " distr = tf.random_uniform(shape=tf.shape(inp))\n",
172 | " sample = tf.nn.relu(tf.sign(inp - distr))\n",
173 | " return sample"
174 | ]
175 | },
176 | {
177 | "cell_type": "markdown",
178 | "metadata": {},
179 | "source": [
180 | "#### Model (Training)\n",
181 | "using contrastive-divergence-k(k = 1)\n",
182 | "\n",
183 | "x = input
\n",
184 | "h(x) = P(h|x) = s_h
\n",
185 | "h(x̄) = P(h) = s_h1 (k=1)
\n",
186 | "W = W + lr(xᵀh(x) − x̄ᵀh(x̄))
\n",
187 | "B = B + lr(h(x) − h(x̄))
\n",
188 | "C = C + lr(x − x̄)
"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 7,
194 | "metadata": {
195 | "collapsed": true
196 | },
197 | "outputs": [],
198 | "source": [
199 | "# visible-> hidden\n",
200 | "p_h = tf.nn.sigmoid(tf.nn.xw_plus_b(X, W, B))\n",
201 | "s_h = get_sample(p_h)\n",
202 | "\n",
203 | "# hidden -> visible\n",
204 | "p_v = tf.nn.sigmoid(tf.nn.xw_plus_b(s_h, tf.transpose(W), C)) # reconstruction\n",
205 | "s_v = get_sample(p_v)\n",
206 | "\n",
207 | "# visible(1) -> hiddden\n",
208 | "p_h1 = tf.nn.sigmoid(tf.nn.xw_plus_b(s_v, W, B))\n",
209 | "s_h1 = get_sample(p_h1)\n",
210 | "\n",
211 | "# error - just for measuring correctness of reconstructed image\n",
212 | "error = tf.losses.mean_squared_error(labels=X, predictions=p_v)\n",
213 | "\n",
214 | "# positive and negative phase gradients\n",
215 | "positive_phase = tf.matmul(tf.transpose(X), s_h)\n",
216 | "negative_phase = tf.matmul(tf.transpose(s_v), s_h1)\n",
217 | "\n",
218 | "contr_div = (positive_phase - negative_phase) / tf.to_float(tf.shape(X)[0])\n",
219 | "\n",
220 | "# calculate delta for var\n",
221 | "change_w = contr_div\n",
222 | "change_b = tf.reduce_mean((s_h - s_h1), axis=0)\n",
223 | "change_c = tf.reduce_mean((X - s_v), axis=0)\n",
224 | "\n",
225 | "# Adjust Weights\n",
226 | "new_W = W + learning_rate * change_w\n",
227 | "new_B = B + learning_rate * change_b\n",
228 | "new_C = C + learning_rate * change_c"
229 | ]
230 | },
231 | {
232 | "cell_type": "markdown",
233 | "metadata": {},
234 | "source": [
235 | "### Train Neural Network"
236 | ]
237 | },
238 | {
239 | "cell_type": "markdown",
240 | "metadata": {},
241 | "source": [
242 | "Initialize random Weights and biases"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": 8,
248 | "metadata": {
249 | "collapsed": true
250 | },
251 | "outputs": [],
252 | "source": [
253 | "w = np.random.uniform(0.1, size=(n_input_layer, n_hidden_layer))\n",
254 | "b = np.random.uniform(0.1, size=n_hidden_layer)\n",
255 | "c = np.random.uniform(0.1, size=n_input_layer)"
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {},
261 | "source": [
262 | "split into batches"
263 | ]
264 | },
265 | {
266 | "cell_type": "code",
267 | "execution_count": 9,
268 | "metadata": {
269 | "collapsed": true
270 | },
271 | "outputs": [],
272 | "source": [
273 | "n_batch = features_train.shape[0] // batch_size\n",
274 | "batched_data = np.split(features_train, n_batch)"
275 | ]
276 | },
277 | {
278 | "cell_type": "markdown",
279 | "metadata": {},
280 | "source": [
281 | "#### Start session"
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": 10,
287 | "metadata": {},
288 | "outputs": [
289 | {
290 | "name": "stdout",
291 | "output_type": "stream",
292 | "text": [
293 | "*********** Train ***********\n",
294 | "Epoch: 0, Training-error: 0.04644489, Validation-error: 0.02066885\n",
295 | "Epoch: 1, Training-error: 0.01913762, Validation-error: 0.01762982\n",
296 | "Epoch: 2, Training-error: 0.01702874, Validation-error: 0.01610900\n",
297 | "Epoch: 3, Training-error: 0.01586430, Validation-error: 0.01527168\n",
298 | "Epoch: 4, Training-error: 0.01507473, Validation-error: 0.01459762\n",
299 | "Epoch: 5, Training-error: 0.01450215, Validation-error: 0.01412933\n",
300 | "Epoch: 6, Training-error: 0.01405964, Validation-error: 0.01375704\n",
301 | "Epoch: 7, Training-error: 0.01367655, Validation-error: 0.01342796\n",
302 | "Epoch: 8, Training-error: 0.01338743, Validation-error: 0.01311545\n",
303 | "Epoch: 9, Training-error: 0.01310958, Validation-error: 0.01285918\n",
304 | "*********** Test ***********\n",
305 | "Test-error: 0.01286180\n"
306 | ]
307 | }
308 | ],
309 | "source": [
310 | "disp_imgs = []\n",
311 | "\n",
312 | "with tf.Session() as sess:\n",
313 | " tf.global_variables_initializer().run()\n",
314 | " \n",
315 | " print(\"*********** Train ***********\")\n",
316 | "\n",
317 | " # Epoch-training\n",
318 | " for epoch in range(n_epoch):\n",
319 | " err = []\n",
320 | "\n",
321 | " # Batch training\n",
322 | " for b_idx in range(n_batch):\n",
323 | " e, w, b, c = sess.run([error, new_W, new_B, new_C], feed_dict={\n",
324 | " X: batched_data[b_idx], W: w, B: b, C: c})\n",
325 | "\n",
326 | " err.append(e)\n",
327 | " \n",
328 | " val_e = error.eval({X: features_valid, W: w, B: b, C: c})\n",
329 | "\n",
330 | " print(\"Epoch: %d, Training-error: %.8f, Validation-error: %.8f\" %\n",
331 | " (epoch, sum(err) / len(err), val_e))\n",
332 | " \n",
333 | " print(\"*********** Test ***********\")\n",
334 | "\n",
335 | " # Test-Reconstruction\n",
336 | " test_e, reconstd_image = sess.run([error, p_v], feed_dict={\n",
337 | " X: features_test, W: w, B: b, C: c})\n",
338 | " \n",
339 | " print(\"Test-error: %.8f\" % test_e)\n",
340 | " \n",
341 | " disp_imgs.extend(features_test[:test_disp])\n",
342 | " disp_imgs.extend(reconstd_image[:test_disp])"
343 | ]
344 | },
345 | {
346 | "cell_type": "markdown",
347 | "metadata": {},
348 | "source": [
349 | "#### plot output"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": 11,
355 | "metadata": {},
356 | "outputs": [
357 | {
358 | "data": {
359 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPgAAANUCAYAAACe5+ADAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XecVdW5//HPlt6bgBRBARFpooCCoqKiYq9EY/nZSywx\nGk1ssWI0mmJujKhRo8ESu6IgGgsq2IKIvYBSDb335vz+mHnWWcezmTnD7Dll+X2/Xvd1933mzDnb\ny17zPGfVqKSkBBEJ01b5vgERqT5q4CIBUwMXCZgauEjA1MBFAqYGLhKwoBt4FEXjoig6K9/3IZKk\nKIoGR1E0O5vXFkQDj6JoehRFa6IoWhlF0dwoih6Moqhhvu9LklUs/85RFF0fRdHD1fj+D0ZRNLy6\n3t9XEA28zOElJSUNgT7ALsCVeb4fqR5F/+8clSqktrNZBXeTJSUlc4GXKX0AiKKoThRFf4yiaGYU\nRfOiKLo7iqJ6ZT9rFkXRi1EULYiiaEnZdft83r9kpzL/zmU/PzKKoslRFC2PoujbKIqGlsXbRlE0\nKoqixVEUTY2i6Gzvd66PouiJKIr+FUXRiiiKPo+iqJ/3899GUfR92c++jqJo/7L3vQo4vqzS+Ljs\nteOiKLo5iqIJwGqgU1lFMuRHn/ew938PiqLonSiKlkZRNCuKotOiKDoHOAn4Tdn7v+D9dzxd9ixP\ni6Lol9771CvL+kuiKPoC6J/t/58LroGXNdCDgalloT8AXSl9ELoA7YBry362FfBPoCPQAVgD3JnL\n+5UtU5l/5yiKdgP+BVwONAX2BqaX/d5jwGygLXAc8Psoivb3PuoI4N9lvzeKsucjiqIdgQuB/iUl\nJY2Ag4DpJSUlY4HfA4+XlJQ0LCkp2dl7r1OAc4BGwIwK/vs6AC8BfwNalv13TS4pKbkXeAS4rez9\nDy+rBl4APi77794f+FUURQeVvd11QOey/zkIOLW8z05TUlKS9/+h9B9rJbACKAFeK/sHiYBVQGfv\ntQOBaZt5nz7AEu//Hgecle//Pv1P1f6dgXuAv8S837bAJqCRF7sFeLDs+nrgVe9n3YE1ZdddgPnA\nEKDWj973euDhH8XGATfG/PcMifs9Sr96PLuZ/z88CAz3/u/dgZk/es2VwD/Lrr8Dhno/OweYnc3/\nz2tm92cgJ44qKSl5NYqifYBHga2B2kB94MMoiux1EVADIIqi+sBfgKFAs7KfN4qiqEZJScmmXN68\nZK3S/86UNuQxMe/VFlhcUlKywovNAPp5//dc73o1UDeKopolJSVToyj6FaWNskcURS8Dl5aUlPyv\nnHuflc1/oHfP32b52o5A2yiKlnqxGsDbZddtf/TZ5VYPvoIr0UtKSt6k9C/cH4GFlJbdPUpKSpqW\n/U+TktJOGoBfAzsCu5eUlDSmtHSD0odDClgl/51nUVqe/tj/gOZRFDXyYh2A77O8h0dLSkoGUdrA\nSij9mkDZdeyv/Oj/XkXpHyazjXe9uXuOe59ZlFYrTb3/aVRSUnJI2c/nUPoHw3TYzPtmKLgGXuYO\n4ACgN/AP4C9RFLUCiKKonffdpBGlD8bSKIqaU/pdRYpHtv/O9wOnl3WCbVX2s24lJSWzgHeAW6Io\nqhtFUW/gTEq/45YriqIdoyjaL4qiOsBaSp8jq/rmAdtl0VM+GTghiqJaZZ13x3k/ewQYEkXRz6Io\nqhlFUYsoivp479/Je+0HwPKyTr96URTViKKoZxRF1pn2BHBlWadye+Ciiv77TEE28JKSkgWUdqr8\nDvgtpR0x70VRtBx4ldKsDaUPSD1KM8B7wNjc361sqWz/nUtKSj4ATqf069gy4E1Ksy7Az4HtKM3m\nzwLXlZSU/CeLj68D3ErpszMXaEVp7znAk2X/e1EURZPKeY/fUZqllwA3UPqVw/7bZgKHUFplLqb0\nj4F12N0PdC/rXX+u7Ovk4ZT2IU0ru6f7gCZlr7+B0rJ8GvAKMDKL/z4AorIv7SISoILM4CKSDDVw\nkYCpgYsETA1cJGBq4CIBUwMXCZgauEjA1MBFAlapxSZRFGlWTIySkhLNfU+Anq94VXm+lMFFAqYG\nLhIwNXCRgKmBiwRMDVwkYGrgIgFTAxcJmBq4SMDUwEUCpgYuErBC2hddJGe8/dedEPcnVAYXCZga\nuEjAVKJL0Yors/3YDz/8AEC9eu6QUlq3bg3A0qWpU4JWrFiR9noIp1xXBhcJWM4y+HHHlZ7qcvbZ\n7vhm/ve/0nPe1q5d62KPPFJ66szcuakz46ZOnYqIqVGj9ExCP1vvscceAAwZ4o7rZp999gGgU6fU\nKUEbNmwAYNy4cS72/felR5k9/LA72pvly5cDsNVWqRxo16tXr854v1q1arlYs2al52DOmTPHxdas\nWQOkVwm5oAwuEjA1cJGAVepssqpsqfPdd98BsN1222X1euv4APj888+39GNjzZ49G4DbbrvNxSZO\nnLjF76ctm5IR93xZGe4/pxarWTP1DfPkk08G4JZbbnGxRo1KTxX2y2wrvevXT536az/fuHFjRmzd\nunUuZj9///33XaxLly5pnwUwf/58AM455xwX+/rrrwFYtWrVj/8TK6Qtm0QkVs462axzrXfv3i72\n5ZdfArDTTju52K677grA4MGDXWzAgAEAzJo1y8W23dY/Dz2d/5d4wYIFALRp0ybjdTNnznTXVcng\nUn3iKkyLbdq0ycVefvllID2rX3755QAsW7bMxbbffnsgvVPM3q9u3bouZlWCdY5BqkPNOuUAunXr\nBsDWW2/tYk2bNgWgf//+LjZ58uTN/jdWJ2VwkYCpgYsELGedbJVlY4kAffr0AeDDDz90Mb/8+TF/\nXP2bb74BUl8HAJo3bw7ABRdc4GIjRozY4ntVJ1sysn2+bBzcH1O28rpjx44uZmVzy5YtM17385//\n3MXeffddIFVuQ6o0t9If4M033wRg/fr1LrbzzjsDMGbMGBezrwl9+/Z1Metk25IZcupkE5FYBZvB\nk3LssccC8MQTT7jYZ599BsC+++7rYosXL97iz1AGT0Zlny/L5JDK5n7MMqnf8RbHsrU/nGbtwu+w\n9e7TXVun8Pjx413MhsJ69erlYjarzf/dbNueMriIxFIDFwlYkMtFW7Vq5a7vuusuIL38uvHGG4Gq\nleWSf3ELN/yx8bjy2sSVytkuF/W/Bvz9738H0r8GzJgxA4B58+Zl/G6ul6Eqg4sELMgM7g9/2RDJ\nkiVLXMyGLKS4VSUbljdDbnOsCrRlqJDqZPOz+r333gvkfmloHGVwkYCpgYsELKhx8D333BOA119/\n3cVsUYG/eOWtt95K9HM1Dp6MXD5ffqer9/nu2spr/3W2JPTjjz92sQ4dOgCpRU0Abdu2Bcrv5KsM\njYOLSKygOtkOOeQQIH0p4GuvvQak5huLQPwGEv4Qm/E7yoYNGwZAu3btXMx+x5Y0Q3KZOwnK4CIB\nUwMXCVjRl+j+pvZDhw4F0pfzXXfddUBqNw756fE7z+L2eCtvvNof3z7xxBMz3m/SpEkATJs2LZmb\nTZgyuEjAij6D275bALvssgsAY8eOdbF33nkn5/ck4ejcubO7to60hQsXupjNaivUo46UwUUCpgYu\nErCiLdEPPfRQAH73u9+5mG1qb8tBRWDLFpZYOf7iiy9m/M4dd9zhYv62yuWJOwk1F2W9MrhIwIpq\nLnqLFi3c9QcffACkNrIH+Pe//w2khjNyRXPRk5GL5ysukxrbbRfgq6++AqBBgwYuZrv6HnjggS5W\nXgb357FXZemo5qKLSCw1cJGAFUUnm80m8se3rTT/9ttvXczvcBOJE/eVtHbt2gBcfPHFLmbl+tKl\nS11s+PDhQPqJo3Hsa4B2dBGRalUUGdxmE/lHwZhLL73UXfvZXKQ8fmdb165dAbjwwgtdzI6/sl15\nAcaNGwdUnJkLaVabMrhIwNTARQJWsCW6f0rkK6+8kvFzW2TizzQSyVb79u3d9SOPPAJA48aNXczO\nErvzzjtdzF+GXCyUwUUCVrAZ/JxzznHXtnOlz85qLqQODSkscZs72LN03333uVjv3r0zfnf69OkA\nzJ8/38WK8VlTBhcJmBq4SMAKrkQfNGgQABdddFGe70QKnb9fWtyWx1ai20w1gP322w+AgQMHZrze\nL8HPPPPMjFgxUgYXCVjBZfC99toLgIYNG2b8zJ+ptnLlypzdkxSmuKztswzu76hrZ3bXr18/4/X+\n66ZMmZLELeadMrhIwNTARQJWcCV6HDvNcf/993exxYsX5+t2pID5i0ishPd3Vjn++OMBWLRokYut\nWrUKgJEjR7qYdeBVdM5YzZo1s3pdviiDiwSsqPZkK1Taky0ZudyTzc/0Nozmt4WKNnXIJe3JJiKx\n1MBFAlbZTraFwIzquJEi1rHil0iWqv35sjLcL8dt95YCVaXnq1LfwUWkuKhEFwmYGrhIwNTARQKm\nBi4SMDVwkYCpgYsETA1cJGBq4CIBUwMXCZgauEjA1MBFAqYGLhKwSq0m04YP8bThQzL0fMXThg8i\nEksNXCRgauAiAVMDFwmYGrhIwAru4IPLLrsMgHr16rmYHdB+3HHHZbx+xIgR7vrdd98F0jewl5+W\nWrVquesffvgBSD/44OabbwagRYsWLmYHanz55ZcuZueUnXbaaS5mByT425zZwQerV6+u9L3a1s3V\nuW2aMrhIwNTARQJWECebPP744+46rgzPlh0vPGTIEBebOXPmlt9YljTRJRmVfb780ttOJ/HPCLvo\noosAOOGEE1ysbdu2AGyzzTYuZmW2z9rFihUrXOy8884DYPTo0S62fPlyu/eM302KJrqISKy8drJZ\n5q4oa3/11VcAvPzyyy7WqVMnAA4//HAX69y5MwAnnXSSi91yyy3J3KwUHOtEA2jQoAGQ3sn2s5/9\nDIBdd93VxSzrWycapLKvXxHYe9etW9fFbr31ViC9Q+3NN98EYNmyZVX5T6k2yuAiAVMDFwlYzkv0\nfv36ueujjz464+eff/45AEcccYSLLVy4EICVK1e6mHWqvPfeey628847A+ljnBIGvxPLSulNmza5\nmJXIPXv2dLEJEyYAqa9zABMnTgRSX/sA5s6dm/a+AH369AFSzxTAokWLAOjbt6+LvfXWW0DFHWu5\nGPOOowwuErCcZ/A2bdq4a/urZlkb4KCDDgJgzpw55b7Pr3/9awC6d++e8TN/GEPC4Gc+y9z+bMdG\njRoB6cNfHTp0AOD+++93sYcffhiAefPmudj69esBqFOnjov16NEDgL333tvFunXrBsBzzz3nYjZM\nVpn7zyVlcJGAqYGLBCznJfoLL7zgrrt06QKkzxZavHhxVu9js5P8cU/5abHSGlLj2u+//76L2VfA\nL774wsVmzZoFxC8YqVGjhovZbEjr4IVUZ9z8+fNdzB+L//HnxlEnm4gkJq8z2WbMmFGp119++eXu\numvXrhk/t7/e/l9xCVfcMJk/1GXLh/0KMS6DWueaPyvSri3jAzz66KNAepUZ934W8ysC/17LY/cf\nVxlsCWVwkYCpgYsErOB2dIlz2GGHAXDjjTe6mM1k8zs8rrzySmDLdteQ4mZlsV8KL126dLOv98vn\nAw44AIA77rjDxerXrw+kLzZp1aoVkL5QJU7cTLtsJVWau3tJ9N1EpKAURQa3+euWtX3+ZhG2dE9+\nerIdfrIhrMaNG7uY7bvWrFkzF7ONI/wMbvwNIvwNJkzSHWVVoQwuEjA1cJGAFWyJ7k/oP/DAAzN+\n/q9//QuAa665Jmf3JMXPvub5Y94DBw4E0ktq2yL566+/djGbX+GX5YVUjsdRBhcJWMFlcFtOusce\ne7iYzTTy5wUPHz4cSN8EQiSOPze8efPmAJx44okuZusZ/BlqViE+9thjLjZlyhQgfbacP9xm1q1b\nl8RtJ0IZXCRgauAiASu4Ev3pp58G4vdVs904IHXIgUhF/PkTAwYMAFKz0iC1GOW1115zsT/84Q9A\n/KxIv+S3Et0fh7dY3Bh5rimDiwSsII4u8ndQfeKJJ4D0jRzGjRsHwJFHHulihdS5pqOLkpH082XP\n0MUXX+xitq6hYcOGGa8/5phj3LUtE/U71Oz9/JlstumE344sw/sbUlSFji4SkVhq4CIBy2snm3Wk\nXXXVVS4Wt8fa5MmTgcIqy6Xw2YEHF1xwgYvZnIolS5a42Nlnnw3A7NmzXSzuvLK40ttK82w71Krz\nFNI4yuAiActrBrfDC/r375/xM38u+nXXXZeze5Li1rp1a3f97LPPAumHIdiBB7ZfG6SOM6poh1Tb\nwMHfyCHbOehxO63acNqWbAyRLWVwkYCpgYsELK/j4GvXrgXiO9bat2/vris6pyzfNA6ejKo8XzY2\nve+++7rYX//6VwBatmzpYr/5zW+A1HwLSC0N/dG9AOmdbNZW4sa8/RLcnme/9LavCXZeGsD06dOB\n9Oc7ruTXOLiIxCq4uejGlvVBxbtYGtv83n+9/TVt0qRJxuubNm3qri+99NLNvq//l/i3v/0toJ1b\nC40NU02aNMnFrPPs4IMPdrHevXsD8MYbb2T8boMGDVzM9mfzh2Yt0/vPQ7t27QDYdtttXcyeq169\nernY6aefDqRn64suughI3xnY6OADEamQGrhIwAq2RP/kk08q/TtPPvkkkF4G2bjo8ccfn8h92QmT\nN998cyLvJ1surgPML6mtE8tmr0GqVLatkiF+fNt+1z8N1/7t7VRcSO0X2LFjRxezLZn9RSlWcr/+\n+usu9otf/AKACy+80MWy/TqaLWVwkYDlNYOPGTMGSF8GWhXDhg3L6nXWqRLXkTFq1Ch3bZ00vrff\nfnsL706SFjev28/qH330EZA+x3yHHXYA0vdSs/fxh7+sk6179+4uZkch+RWBdaj52TqO7efmv58t\nT63OHVmVwUUCpgYuErCC2NHFZhdB/Kw206NHD3ddXqfZAw884K6ts8Rn+7599dVXlbnNzdJMtmTE\nPV9xBwvExaxE9ktle7YbNWrkYueffz4A5513novZ7i5+B5d1uNncCv/zvvnmGxdr27YtANtvv72L\nWTnuv87GvH323hW1Qc1kE5FYBZHBi50yeDKyfb5sl9S4Pc/8TrZsO68s6/snifbs2RNI3933gw8+\nAGD58uUuZpnef53t0hrXCbglMyCVwUUklhq4SMBUoidAJXoy9HzFU4kuIrHUwEUCpgYuEjA1cJGA\nqYGLBEwNXCRgauAiAVMDFwmYGrhIwCq7o8tCYEZ13EgR61jxSyRLer4yVen5qtRUVREpLirRRQKm\nBi4SMDVwkYCpgYsETA1cJGBq4CIBUwMXCZgauEjA1MBFAqYGLhIwNXCRgKmBiwSsUqvJtG91PO2L\nngw9X/G0L7qIxFIDFwmYGrhIwNTARQKmBi4SsMruySYShChKdUyHvG2ZMrhIwNTARQIWVInetWtX\nAL766isXu/jiiwH429/+lpd7ksLkl+VbbZWZ52rVqgXA+vXrXax+/foAnHjiiS52//33A/DDDz9U\ny31WlTK4SMCCyuC77LILkP7XdPbs2fm6HSlA1rnmZ3C79mPr1q0DoGHDhi529tlnA/DII49k/G6h\nUgYXCZgauEjAgirR+/TpA8CqVatc7Nlnn83X7UgBiiupyyuzO3Xq5K6vu+46ABYvXuxijz32GJDe\nGVdIlMFFAlapwwcLcb1uz5493fW7774LwMiRI13s/PPPr/Z70HrwZFTX87Uls9bsdxYsWOBizZo1\nA2DChAkutu+++wKwadOmKt/n5mg9uIjEUgMXCVjRd7J169bNXTdo0ACAxx9/PF+3IwVoS8aqmzdv\nDkCNGjVczOZXTJw4MSNWqJTBRQJW9J1sH3zwgbtu2bIlkN7x5g+ZVRd1siWjkJ6v1q1bA/Dee++5\n2Nq1a4HUcCykZrxVJ3WyiUgsNXCRgBVtJ9t2220HQL9+/Vzsm2++AXJTlkvY6tWrB0DTpk1d7MUX\nXwRgw4YNebmnLaEMLhKwos3g++yzT0bMn3UkUhWDBw/OiFllWOhLRH3K4CIBUwMXCVjRlui9evXK\niN122215uBMJhb832+mnnw6k7+jy5JNPAirRRaRAFNVMtgEDBrjr0aNHAzB9+nQX23PPPYHUjKNc\n0Uy2ZOT7+dptt93ctS09XrlypYt17twZgIULF+b0vjSTTURiqYGLBKyoOtmGDBnirm0539ixY10s\n16W5hKFOnToAPP300y5mHW7+YpKlS5fm9sYSoAwuErCiyuA777yzu7bOwaeeeipftyOBqFmztBn4\nw2S2kcNDDz2UESsmyuAiAVMDFwlYUYyDb7PNNgBMnjzZxZYsWQLATjvtlI9bSqNx8GTk6/lq0aIF\nAFOnTnUxK9vtZ5C/ww00Di4isYqik+20004DoFWrVi720ksv5eluJDR9+/YF0uedT5kyBSiuzR3i\nKIOLBEwNXCRgRVGid+zYMSNmnWwiVTV06FAANm7c6GK33nprvm4nUcrgIgErigx+2GGHZcReeOGF\nPNyJhMI/kuikk04CYMaMGS723XffAbnZ3GFLTj/NljK4SMDUwEUCVrAl+qBBg9y1zWQTSYr/tc+W\nHtevX9/F/BNEq1t1fg1QBhcJWMFm8KOPPtpdW4fIRx995GJvvfVWzu9Jip89Sw8//LCL2bzzOXPm\nuFixz2AzyuAiAVMDFwlYwZXo1tFxyCGHZPzM371l06ZNObsnCUetWrWA9N1Z7Fl65JFHMmLFThlc\nJGAFt+GD/YV98803XWz+/PkAnHjiiS62evXq6r6VrGnDh2Tk4vmyTrb+/fu7mO3G+/HHH7tYIR1P\npA0fRCSWGrhIwAquRC9GKtGToecrnkp0EYmlBi4SMDVwkYCpgYsETA1cJGBq4CIBUwMXCZgauEjA\n1MBFAlbZ5aILgRkVvuqnJfNUBtlSer4yVen5qtRUVREpLirRRQKmBi4SMDVwkYCpgYsETA1cJGBq\n4CIBUwMXCZgauEjA1MBFAqYGLhIwNXCRgKmBiwSsUqvJtG91PO2Lngw9X/Gq8nwV3OmiIoUkilJt\nq7yVl1ttlSqG/ZNL800lukjA1MBFAqYSXaQcFW2IYiV8IZXlPmVwkYApg0vw/I6yuFjdunUB2HXX\nXV1sypQpAGzYsMHFNm3aBMDGjRtdbP369RmvKyTK4CIBUwMXCVhRlOgNGjQA4Pbbb3exc889F4AP\nP/zQxYYNGwbAjBnaeVfiS3Mbr65du7aL9enTB4Bf/epXLtarVy8g/fl65JFHAJg0aZKLzZ07d7Of\nWwg7FiuDiwSsKDJ4mzZtADj77LNdzIYl+vbt62KHHXYYAH//+99zeHdSqOIyqV372b1jx45p/xtg\n7dq1APTo0cPFjjjiCADefffdcj83LnNb5ZDr4TRlcJGAqYGLBKxgS/SWLVu664ceeiiPdyLFKq4c\ntlJ59913d7HzzjsPgLZt27qYlfBLlixxsXXr1gHQuHFjF1u6dClQcYda3L3kojNOGVwkYAWXwX/5\ny18CcNRRR7nYbrvtltXv7r333kD60r2PP/4YgLfeeiupW5Qi4z8Pu+yyCwAPPvigi7Vr1w5IZWiA\n+fPnA6mZagDt27cHoF+/fi62YMECAFavXu1i5WXkbJefJkUZXCRgauAiAavU+eC52FLHJvRnO15Y\n0U4aNqvt+OOPdzF/dlIStGVTMpJ+vqwctllpkOqw7d27d8brV6xY4a6nTZsGpEp1gBo1agDpZfvI\nkSMBeO2111xs2bJlQHrJXxVVeb6UwUUCVhAZfMyYMe764IMPBrLP4IsWLXLXK1euBNJnJMWxv8RJ\nUQZPRhLPl9+JZcNZ//73v13swAMPBNIrP8vI9vwATJ8+HUgNgwE0bdoUSM/0xma+Afzzn/8E4I03\n3nAxqwS2pJNNGVxEYqmBiwQsr+Pg++yzDwA77riji1lpXlGJfvfddwPwyiuvuJh1buy3334udvXV\nV2f87i9+8QsARowYsSW3LQWsVq1a7vqyyy4DYNCgQRmv83dgsa9548aNczEr2+fMmeNiNWuWNhd/\nAcpOO+0EQKNGjVysc+fOAAwZMsTFtNhERBKX80627bbbzl3bsrutt97axeL+0tlQ19NPP+1iN9xw\nA5A+g8j4nWz2Gf7cdusQufbaa13szjvvBLZsby11siUjieerZ8+e7to6uWzDEEh1mo0fP97FHnjg\nAQC+/PJLF7NnxH8e7DkdOnSoi5155pkAdOnSxcUs+z/11FMudumllwLpHXnqZBORKlEDFwlYzjvZ\nrKMC0kvzH3vzzTfd9QknnADAwoULs/oMf0+2W265BYA///nPLla/fn0AbrvtNhcbNWoUAN9++21W\nnyGFxeY2+B229nXPH8u+//77gfRdf2xJqL8dctzOL1ZeP/rooy5m73355Ze7mH1F9MfGGzZsmPYe\nuaIMLhKwglsuOnHiRADOOOMMF8s2c8exzHzSSSe5WP/+/bf4/aSw+R1gloX9TRuso9afAelnbhO3\nI6utk1i8eLGLPfvsswAcfvjhLtatWzcgtUcgwD333JPxudYZV51LSJXBRQKmBi4SsLyW6P6Ef+Pv\nlZUEK3/8z4r73Ouvvx6AU045JdHPl9ywTiz/IAIbw7ZdV/xrK7chvkMtrkS31/lltL2PzaL0f77N\nNttk3F9FXwdUootI1nKewW0HS8jNvFzr/LC9uPzP9T/fMrgUJ8t8tr8apDqx/vvf/7qYdZBV9OzZ\nz/1qz+a527JRgO7duwPp893tXpYvX+5idlSS/372uupsB8rgIgFTAxcJWM5LdH+8MGm2oMTKJoCr\nrrpqs6/3O18K9QB3yY6Vu/44s5XD/lLO8v6d/Q4u+11/+WnXrl2B1HJjgAEDBmR8hnWa+Qta3n//\nfUCdbCKSoIKbyVYVtrnDBRdcUO7rbL+tU0891cVmzpxZbfcl1c+Gq/zlotYZNnDgQBdr3bo1APPm\nzXOxuOEvy6p+p53NX/cP4rAOMn8HVRuqs1mUED9r7cefXx2UwUUCpgYuErCiL9H9LZf9pYLl+eKL\nL4D0ThApblbm2vlhAHXr1gXSTw09+eSTAXjuuedczBajrFq1ysVs5pnt2AKpM8lsTBtSnWbff/+9\niz3xxBNFlWJbAAAgAElEQVQAPPPMM5u9z1xRBhcJWM73ZPvmm2/cte0+6fOX2Jl7770XSP9LbCo6\nuiiODj4oTFV5vmw46+KLL3ax3//+9/a+LmaHFnz66acuZktIJ02a5GJ2QMK5557rYjYM67+fda6N\nHTvWxWz/tVmzZrlYVYZhtSebiMRSAxcJWM472fzDBvw90cyLL74IxJfb2S4QiGMHJUiYrLPrgw8+\ncDHrNPMXh9i1vyzZDjKYMmWKi9l+gbZ/H8Q/X++88w4Af/nLX1zM5lTk+pCDOMrgIgHLeSdbRYcS\nVPaIF7+TzWYn+RvYn3POOUD6ETRxhyVUhTrZkpHE89WkSRN3bTvqnnXWWS5mnXH+cx93uqjt/uu/\nzjpnp06d6mJ2PrhfIfq7qZqqDI+pk01EYqmBiwQs5yW6b++99wbgqKOOcjEbx9ySEv2Xv/wlkL6p\nfS6oRE9GEs+X/zw0a9YMgL/97W8udsABBwDpcyFsXNtfymnPn/91z77m/etf/3Kx9957D0ifBWdj\n3knNWlOJLiKx8prB49jG9dY5BqlNIvzldza7zZ9VZHPMc730Uxk8GUk/X/ZsNG7c2MWsQ3fnnXd2\nMTtYY4cddnCxzz77DIBp06a52Jo1a4D0TtotGc6tLGVwEYmlBi4SsIIr0YuRSvRkVNfzFXegQVzM\nL60LYRaaUYkuIrGKfsMHkYr4VWquN1zIN2VwkYCpgYsETA1cJGBq4CIBUwMXCZgauEjA1MBFAlbZ\ncfCFwIzquJEi1rHil0iW9HxlqtLzVampqiJSXFSiiwRMDVwkYGrgIgFTAxcJmBq4SMDUwEUCpgYu\nEjA1cJGAqYGLBEwNXCRgauAiAVMDFwlYpVaTaV/0eNoXPRl6vuJpX3QRiaUGLhIwNXCRgKmBiwRM\nDVwkYDqbTKSS/JNJC33LM2VwkYCpgYsETCW6/KRstVVpTqvKkcKFXpb7lMFFAlZwGXzXXXcF4Jln\nnnGx7bbbrsrve+CBB7rrL7/8EoBZs2ZV+X2lcNWoUQNIZW2AunXrAtCjRw8XW716NQDffvttRqyi\nbG3vXatWLRfbsGEDAD/88MMW33tSlMFFAqYGLhKwgivRDzroIADq1KmT6Psefvjh7vqMM84A4IQT\nTkj0MyT//DFqu27Tpo2L3XzzzQDsvffeLjZmzBgA/vCHP7jYzJkzgfgS3S/5mzRpAkCfPn1crEWL\nFgA8++yzLrZp06bK/qckQhlcJGAFkcFr1kzdxiGHHFItn/Hhhx+660svvRSABg0auNiqVauq5XMl\nt/yMa5n23HPPdbGhQ4cCqY4wgHXr1gGwcOHC2Pcpj3XWjRo1ysWsw61Lly4uNnv27Oz+AxKmDC4S\nMDVwkYAVRIm+7777uuuBAwcCcNtttyX6Gc2aNXPX3bt3B6B+/fouphI9PNZRe9RRR2XE3n77bRe7\n8sorAVi7dq2LxZXo1mnnd7J17twZSP+6Z6+75JJLXOyyyy7b7PtWJ2VwkYDlNYP37NkTgMcee8zF\nbDbR73//+0Q/68gjj0z0/aTwde3aFYDtt9/exTZu3AjANddc42Jr1qzJ6v0s+/pDXu+//37azyB9\nqO7Hv5tryuAiAVMDFwlYXkt0K5P8Dgobp1y5cmUin9G8eXMA9tlnHxcrhEUAUj3q1avnrp944gkA\nateu7WIvv/wyAF9//XW571Neme3/zL4G+GW7vc4fG7ffUSebiCQm5xn8uOOOc9c2a23q1KkuNnHi\nxEQ/7+qrrwbSs/a4ceMAWLp0aaKfJfl3zDHHuOtOnToBqY41gLPOOguoeG54eZnWn3l5xx13ZMS+\n//57IH32mjrZRCRxauAiAct5iT5s2DB3bTPJ7rrrrkQ/w98B5qSTTgLSS7Lhw4cD6QsOpDDZriwV\nldTWkbbnnnu6mP3OU0895WJLlizZ4nuxjjJ/TkWHDh3SfgbwwgsvAKklp5Ca/eZ/VcxFx5syuEjA\ncpbBbWH8gAEDMn42YsSIRD/rnHPOcddbb701kNqHDeCNN95I9POk+mS7UULjxo2B1OxISK0vePXV\nV12sKtnSloGefPLJGe83d+5cF7N1FHEVYq4PTVAGFwmYGrhIwHJWotsyvXbt2rmYv8gkSbaEz/fZ\nZ59Vy2dJ/vjlro1D+1sf9+7dG0h1hEGqsyvb0t//DHsfm70GqRmXttcbpMa/C+EMM2VwkYDlLIOv\nWLECgMmTJ7uY/YW1+eIAixcv3uLPaNWqFZA+W86MHz9+i99XCpOfIa1Dq1+/fi5maxwuvPBCF3v9\n9dcBeO+99zJ+N24/t2233dbFTj31VCD9ebX93Pzny6qD8jaN2NzPk6YMLhIwNXCRgOWsRLddM/xO\nkGOPPRaA0aNHu9if//znrN7PxjttQQGkZrDFlT5aIhoe/9902bJlQPqsyDvvvBOApk2bZsRefPFF\nF7Nr/3CMwYMHA+kLkmyLZJtbAfDdd99lfEbcUlOLxf2sOimDiwQsqswX/SiKqtwr0K1bN3d94403\nAnDooYe6WLZHFtkm9f7921/WuL+SjRo1ctfZ7sGVrZKSktz+WQ5UVZ4vm2Vmu/ICPPzwwwC0bt3a\nxezUUH95Z9yQmWVuf2MIm4Vps+YA5syZA6TvDBy3mURcBs+2qqzK86UMLhIwNXCRgOW8RI/jn8zo\nn+dUHn8JoHnooYeA1BJRn1+SJU0lejKSeL78r3g77bQTAAcccICL2aIQf7cVO2XWn6Px0ksvAalF\nUpDaz80v+e197DANSM35SIpKdBGJVRBHF/l/Of3ryrIhizj+MkLNSw+XzSwD+OSTT4D0f2+rWP0O\nrgkTJqT9DFL7uPnHFM2YMQNIzZgEWL58OVC4m4cog4sETA1cJGAFUaInpbzZQirLfxrixpkrGm9e\nv379Zn/md9rZPAv//SZNmgSoRBeRPAgqg1snSb4W10v+Jf1vbzPkIDULzv8M2znV74zLdjOJXFAG\nFwmYGrhIwIIq0evWrZsRS3phifw0WGedX3q3adMGgLVr17qYzWTzzz8rJMrgIgELKoOffvrpQPoi\n/ZtuuilftyNFzDK432E2bdo0IH3ozN84ohApg4sETA1cJGAFsVw0KXaqo7+vWy7OIdNy0WTk4vkq\n70TPuBmQfidb//79gfRzyBYsWACkz2SL24a5KrRcVERiBZXB80UZPBmF+Hz5Wd2yud9mcrFbrzK4\niMRSAxcJWFDj4CJJ88txGxPP9eEFVaEMLhIwZXCRSiqm5cjK4CIBUwMXCVhlS/SFwIzquJEi1jHf\nNxAQPV+ZqvR8VWqii4gUF5XoIgFTAxcJmBq4SMDUwEUCpgYuEjA1cJGAqYGLBEwNXCRgauAiAVMD\nFwmYGrhIwNTARQJWqdVkhbjrZSHQrqrJ0PMVT7uqikgsNXCRgKmBiwRMDVwkYGrgIgFTAxcJmBq4\nSMDUwEUCppNNRMphRwb71/7ZZLYr8caNG3N7Y1lSBhcJWFAZ/PDDDwdg1KhRLnbhhRcCcPfdd7uY\nnRIpUpG6deu669GjRwPQp08fF/v0008B2H///V1sw4YNObq7iimDiwRMDVwkYJU6uqgQV/u0aNHC\nXU+ePBmA9u3bZ7yufv367nrNmjWJ3oNWkyWjEJ+vZs2auetp06YB0KhRIxd78803gfQSPenjwLSa\nTERiFX0n29577+2u4zL3Y489BsDatWtzdk8Sjh49erhrqwIXL17sYscddxyQfNZOijK4SMDUwEUC\nVrQlep06dQC4+uqry33dyJEjgcItoaQw2fj3dddd52I2g81/llavXp3bG6skZXCRgBVtBu/VqxcA\nffv2zfiZPy/4pZdeytk9SThq164NwO677+5iNhf9rrvucrFC77xVBhcJmBq4SMCKtkQ/9thjN/uz\nV155JYd3IqHwl4FecMEFADRo0CDjdc8880zO7qmqlMFFAla0GdyfwWbWr18PVDx0JhLH39zh/PPP\nz4jNmDEDgK+//jq3N1YFyuAiAVMDFwlYUZXoe+yxR+y1WbVqFZBaNipSGQMHDnTXrVu3zvh5MZXm\nRhlcJGBFlcH79+9f7s9HjBiRozuRkFhH2qWXXupitWrVAmDlypUuduuttwKpztxioAwuEjA1cJGA\nFVWJ3q9fv4zY0qVL3bVKdNkS1qHmz61Yt24dAB999JGLvfXWW0BxLT1WBhcJWFFk8EGDBgFw4okn\nZvxs2bJl7nr27Nk5uycpbv4MtY4dOwKwcOFCF7OOtBtvvNHFivHADGVwkYCpgYsErChKdDvcwC+r\nzH/+859c344Epk2bNgC0bNnSxWrWLG0an332WV7uKSnK4CIBK4oMbpvL+2x47J577sn17UgALEMD\nDBkyBICmTZu62KuvvgqkD8MWI2VwkYCpgYsErGBLdP+csbjxbxvznjhxYs7uSYqf7btmi0kAhg0b\nlvG6uXPnArBhw4bc3Fg1UQYXCVjBZnB/Q4e44bHnnnsul7cjgfn1r3/trq1zzTYMAfjd734HFOfs\nNZ8yuEjA1MBFAlawJbrNXvP5iwH++te/5vJ2JBD2da9evXouZh1p77zzjovNmjUrtzdWTZTBRQJW\nsBn8oIMOyojNnDnTXfvLREWy9cMPPwAwfvx4FzviiCMAGD58uIsV06YO5VEGFwmYGrhIwAquRLcZ\nRp07d874mX/YerHPMJL8sE62o446ysU6deoEpM+9mDBhAlD8pboyuEjACi6DWyeIP8e8Z8+eAEyd\nOjUv9yThqFGjBpD+LK1ZswaAAw44wMUeeOABABYtWpTxu/aM/vi6ECmDiwRMDVwkYAVXotvk/quv\nvtrFrKPjww8/zMs9SThsO+RPPvnExaxD7YorrnCxFStWAOkLnezZLKaON2VwkYBFlflrFEVR8fzp\nyqGSkpIo3/cQglw+X7bxg69QM3NVni9lcJGAqYGLBKzgOtlEcqFQy/GkKYOLBEwNXCRgauAiAVMD\nFwlYZTvZFgIzquNGiljHfN9AQPR8ZarS81WpiS4iUlxUoosETA1cJGBq4CIBUwMXCZgauEjA1MBF\nAqYGLhIwNXCRgKmBiwRMDVwkYGrgIgGr1GITbboYT5suJkPPVzxtuigisdTARQKmBi4SMDVwkYCp\ngYsETA1cJGBq4CIBUwMXCZgauEjAdDaZ/CTVqFHDXW/atGmzsWKnDC4SsILI4K1atXLXTzzxBADv\nvPOOi917770ATJ8+PdHPbdKkibvee++9ARg7dqyLbdiwIdHPk9zaaqvS/NW9e3cXu/766wH44Ycf\nXMyuhw8f7mJff/01sGXPQM2apc1qxx13dLEpU6YAsHHjxozPrU7K4CIBUwMXCVheS/RmzZoB8Pnn\nn7uYlc3z5s1zseoqzT/88EMXa9myJQB9+/Z1salTpyb6uZIMK4H9cjdOixYtAHj11VddrGnTpgDM\nnTvXxS655BIgVZZD5UvzevXquevLLrsMSO+0e+655wD44osvXGz9+vVA6qsEJF+2K4OLBCznGXzr\nrbd2148//jgAzZs3d7G77roLgIsuuqja7uGaa64BYPvtt3exc889F1DWLgZxmduyoHWWAtxzzz1A\nemeqZWZ7BiCV4SuqCOJEUeleDB07pg4BHTRoEADr1q1zsa5duwKp5wxSGbw6O9uUwUUCpgYuErBK\nnQ+exJ5ZBx54oLt+6aWXMn6+zTbbALBgwYKqflSaHj16uOtPP/0UgGeffdbFTjvtNABWrFhR6ffW\nnmzJqOzzZeUxpDre/Geqd+/eADRo0MDFxowZA6T+vQFWr14NgN8W/Pc2cW3Fvhp06NDBxV577TUA\nxo8f72I333wzAN988025/01xtCebiMTKWSebzVY79thjM3525plnuuvqytz+UInxM/iWZG7JLz/L\nWuXnzx6zDjV/yPXhhx8GYO3atS4Wl5mzrWwtg/vP9Z133glA3bp1XWzatGlZvV/SlMFFAqYGLhKw\nnJXof/rTnwA4+eSTXcxmkj355JPV9rl77bUXAK1bt3axBx98EEiVa1Kc/Blghx12GACNGjVysRkz\nZgDpMyFt0UdVWIcepDrrBg8e7GLvv/8+APfdd5+L5WvhkjK4SMBylsGt08KftfO///0PSM3oqSqb\nD3zVVVe52Pnnn5/2+QBnnHFGIp8n+eXPHluzZg0As2bNcjGbd75kyRIX69KlCwDfffedi8V1qMXN\nLqtVqxYAxx9/vIvdcMMNGa+/++67AZg/f35W/x32vpB8plcGFwmYGrhIwPK6XPTQQw8F4JVXXnGx\npUuXAjBixIis3mOfffZx19bRMWDAgIzXPfXUU1t6m1JgrHNt0aJFLvbzn/8cSC0RhVRnmL9s86ST\nTgLgkEMOcTHrhHvrrbdczMpm66QF2H///QHo16+fi9WvXx+Ab7/91sW+//57IH42XJzq7IBTBhcJ\nWM7mottGCrbwHaBt27ZxnwFkP5PI/ysZ9zvWmTJ06FAX8//aJkFz0ZNR2eerTZs27vrRRx8F0rO1\n8ZcF23Jlv2PLX9bp3QuQPuOtcePGGZ9hu6/6c+D/3//7fwCsXLnSxaqSpTUXXURiqYGLBCxnnWw2\na82W8AH06dMHSC+fL7/8ciB90clDDz202fcdOXKku/74448zfm7bLyddlkv+HXPMMe76q6++AtL3\nPLM9//xtk3fbbTcgtQcfpEp0G0uH1Gy0XXfd1cVsZxj/q6DtAPTAAw+4mHX0+Z9hs+r8OR+V+Xq8\npZTBRQKW8w0fktapUyd3bX9NJ0+e7GIHHXQQkPwyVJ862ZKR7fNlyzBtphrEL/e1Pdb8Y4hs6Myf\neWbDbn7Hm7GlnwCHH354xs+t4vQrSeuE8+fKx3XaZbsHnDrZRCSWGrhIwAribLKquPbaa921fd34\n7W9/62LVWZpLfliZ6y8isc6rir5yllcW++WzzdGw/dogNa5tHXqQ2vrbf18b8/ZLdCv/c3EemU8Z\nXCRgRZvBhw0bBqRmDUGqo8WfoyzhSnrIyd/IwXZJ9eedN2zYEIifoeZ/vl37McvwyuAikhg1cJGA\nFW2JfvDBB2fEXnzxRQAmTZqU69uRPNiSstzGo/0FI3btL16xr362AwykvhLY7Eg/VtGuMHGLYHJB\nGVwkYEWfwVetWuVitnOriM9fUlynTh0g/cRRy679+/d3sV69egHpmdmGzMaOHZvV5/pZW7uqikji\n1MBFAlZUJfp5553nru0gA39rWnWuSUWsU8z2/gMYOHAgAJdccomL7bzzzkCqpIdUme3PbrMS3p+1\nZuL2ZKtdu3bGvVQnZXCRgBVtBre/nKNHj854nX98jS36nzlzZjXfnRSqioaw7NrfoMHmjvudY7aZ\nhB+zLB23N6DtuAqpWZa5yNo+ZXCRgKmBiwSsqEr0OP5uHbapvd9Z8vnnnwNw6qmn5vbGpCDFdYq1\natUKSJ9TYfuz+QuXxo8fD8DChQvL/Qwb//Z3mcnF/mtxlMFFAlb0Gfyss85y12eeeSYA999/v4vd\ndNNNOb8nKXx+5bd8+XIgfXMQG87y550///zzQPx+bv6stbjlovmiDC4SMDVwkYAV1bbJgwYNctc3\n3ngjkH4ipJ1IGrdXV3XStsnJyMXzFTdubZ1sdm4ZpMp1f8abLSe1Qwx81VmOa9tkEYlVVBm8UCmD\nJyPfz1dFJ9XmizK4iMRSAxcJWNGPg4tUho1bx21fXEhleVKUwUUCpgwuPykhZunyKIOLBEwNXCRg\nlS3RFwKZ03h+2jrm+wYCUu3PVxGW6FV6vio10UVEiotKdJGAqYGLBEwNXCRgauAiAVMDFwmYGrhI\nwNTARQKmBi4SMDVwkYCpgYsETA1cJGCVWmyS703xCpU2XUyGnq942nRRRGKpgYsETA1cJGBq4CIB\nUwMXCZh2VRUph3/ut3+meLFQBhcJmBq4SMBUoouUoxjLcp8yuEjAlMFFKsk/R9wU6vbjyuAiAVMD\nFwlYwZbojRs3dte33HILAD179nSxIUOGALBhw4bc3pgUnZo1U4+5ldL+81WrVi0AGjZs6GIbN24E\nYO7cuRkxX6GW5kYZXCRgBZfBTzrpJABuvvlmF9t2220zXmd/gRctWpSbG5OiVbt2bXf9f//3fwAc\ndNBBLrbNNtsAsH79ehez58qqR4Dnn38egCVLlrjY2rVrq3x/W22VyrM//PBDld8v7b0TfTcRKShq\n4CIBq9TxwdW1pU779u3d9UcffQRAixYtXCzuHh9//HEALrzwQhdbvHhxddxehbRlUzKSeL78Meqt\nt94agIcfftjF9t9/fyB9EYnxy2Mrvf1nyt7ntddec7EPP/wQSC/bk6Ytm0QkVkF0sl122WXuunnz\n5ln9zvHHHw/A0KFDXcw65v72t7+5mN9xIuGrU6eOu77zzjsBGDhwoItZ5varQsvW/rNir/Pnou+3\n334AdOzY0cUWLFgAwNKlS12skIbOlMFFAqYGLhKwvJboVuqcfvrpGT/75JNP3PW8efOA1Ow1X5Mm\nTdy1lfqPPPKIi/kzkSQsDRo0cNc2o7Fz584u9v333wPp4+A2G80vqceMGQPA5MmTXeyAAw4A0svx\nTp06AVC/fn0Xmz9/PlBYZblPGVwkYHnN4H369AGgUaNGLvb2228DsM8++7hY3bp1Afj5z3/uYldd\ndRWQ/hfbZiTZjCOAgw8+GMjfEJpUn1WrVpUbmzVrFgDLly93Mcvg999/v4vZtT8r0mZK+s+XPYc2\ndx1SnWyFShlcJGBq4CIBy2uJbmOWfgfFX/7yl4zX2TjlP//5TxcbNmwYkOr48K1evdpdaxz8p2X2\n7NnueubMmUD687VixQoAxo0b52LW4ebPZPv6668BaNmypYvZ2Lhfyhdq55pRBhcJWF4zuN9pZg49\n9FAAnnvuuXJ/t1+/fpv92XvvveeuV65cuYV3J8XIz6jWAeZnZuvQnTNnjotZx5u/McTgwYMz3tsy\n+KRJkzJ+Vqj7tCmDiwRMDVwkYHkt0R977DEAjjjiCBfr378/AN26dXOxXr16AXD00Ue7WLNmzYD0\nGUkWO/vss11s5MiRAHzxxReJ3rsUJr8c33fffYHUcwGp3VNsIQrAlVdeCaSePUjNYPMXr1gZ7o+D\nb7fddkD6eLiV/GvWrMm4P79st/erzlJeGVwkYHnd8MGWhk6dOtXFbG6532kRd4+vvvoqABdccIGL\nvfjiiwDssMMOLvaPf/wDgPPOOy+p286gDR+SkfSGD7b/2mmnneZiNo/cr/yWLVsGpA9/2fCqXxF0\n6NABSH9eba3DG2+84WKjRo3KeD9bdlqvXj0XW7duXcZnxNGGDyISSw1cJGB57WSzBSA/+9nPXOyp\np54C0peBGn+nlt/+9rdA+ra1zzzzDABXXHGFi9n2uP6igW+//bbK9y6F7/LLLwfSZzNah65fFttc\nCX8Z6D333AOklpxCalmzv0zV9hPs3bu3i40ePRqIP5k0ruPN/1ph10ltn6wMLhKwgtiTzTrMAI47\n7jgATjzxRBezDpFrr73WxeI2nL/pppsA2GmnnVzM/mL7v3vqqacmcdtSgPwOWevE8v/tbYjLdleF\nVKYdPny4i7300ktA+nFFVlX+7ne/czHrNPvf//7nYvY7cR3FcTH/npMeMlMGFwmYGrhIwAqiRPdZ\nue6X7dmyDgw7FAFSJbrNaoLU+Lt2eQmblbtWqkNqroQ/G80WNo0fP97F7CugX1J/8MEHQGrcHFIl\neuvWrV3MSnl/JxnbM84/0MNmv1U056MqlMFFAlYQRxclzT+t0Y6bsYMSAG644QYAbrzxxkQ+TzPZ\nkpH082WZ0T+maI899gDSO89mzJgBpC8hNX77sN1Z/f0CrTPYf79bb70VSB8Ss2zuv87eu6ITSjWT\nTURiqYGLBCzIEt1nWzNPmDDBxWz7W3+8/Jtvvtniz1CJnozqer78Et06w/zZbVY+W0cYxHd22Ve/\ntm3bupgdkGDLRiE1C80vvW0rb/8gDn/BS3lUootIrOAzuPn1r3/trm+//XYgNXcd4JRTTgHi5wpX\nRBk8GdX1fMXN9Y577itqC5bB/bno3bt3B2D33Xd3MasE/DPDbX667eoKqT3g/I63OMrgIhJLDVwk\nYD+ZEt3fwN463Lp06eJi1hnnn2qaLZXoySj058vGwf2z9Ow8vJNOOsnF+vbtC8DYsWNdbMSIEUDF\nY95xVKKLSKyfTAb32d5a06dPdzHb4dX/S5wtZfBk5Pv5iuuMi/u5v+ffwoULgfQhNtvFdcCAAS72\n7LPPAunDc9m2PWVwEYmlBi4SsJ9kiW5eeeUVdz1w4EAgfTwz28MSVKInI9/Pl79IyZaT+mPUtszY\nP8PMyvZVq1a5mHXY+ocr7LXXXkD2s9d8KtFFJNZPOoM3btzYXX/88ccAXHzxxS5mG9hXRBk8Gfl+\nvvyONTuyyN/d1NpKu3btXMw2bejZs6eLTZw4EYjfVXVLKIOLSCw1cJGA/aRL9KSoRE9GIT1ftsTU\nbx9xhxHYnmx+Z5yNiSe115pKdBGJpQyeAGXwZOj5iqcMLiKx1MBFAlbZgw8WAjOq40aKWMd830BA\n9HxlqtLzVanv4CJSXFSiiwRMDVwkYGrgIgFTAxcJmBq4SMDUwEUCpgYuEjA1cJGAqYGLBEwNXCRg\nauAiAVMDFwlYpVaTaUF+PG34kAw9X/G04YOIxFIDFwmYGrhIwCq7o4tIUbPtkJM6daQ8/llncVsu\n54IyuEjAlMHlJ8UyadLZ1U4jhdQhB/nK2j5lcJGAqYGLBCzIEr1Zs2buukOHDpt93YwZqR16L7nk\nEgA+++wzF/vmm2+A1NHCknt2vteW7P5bv359ANasWeNidevWBVLnh/mx9evXu5idNeaX3lZy+/dS\ns2ZpE9p///1drGnTpgDsscceLjZixAgAlixZ4mKrV68GYPny5S6W9C7HyuAiASv6s8kOPfRQd33E\nEUcAMHjwYBfr0qXLZn/XMjRAx46l+8vbwe8+G1rZHE1VTUbSz5dl/9q1a7uYDY/5z71d+x1vlrn9\n55/2844AABDLSURBVKFx48YAbL/99i7Wr18/AM444wwX69y5c9rn++/dq1cvF7Prp556KuNefJqq\nKiKx1MBFAlYUJbqVPBdccIGLnX322UDqAHZIL4mSpBI9N6rr+fJLb3ve/WfFOtn8DtlddtkFgIED\nB7rY2rVrAejbt6+L9ejRA4BGjRplvJ//ucbvZLOvhevWrXMxv6PPu2eV6CKSqSiGydq3bw/AxRdf\nnMj7ffXVVwB8/vnnibyfFA4/M9u1DZf5MasKIfVcWTYGWLp0KZAa8gKYM2cOkJ6Z33//fSD1jPrX\n/udaVp8+fbqLWTURl7WTogwuEjA1cJGA5bVE33rrrYH00nvChAkAjB071sWsE2LZsmUutmrVKgAa\nNGjgYq+88gqQPhvNSqiPPvrIxWxmk72HFD8rm3fdddeMnw0aNMhd27/5sGHDXGynnXYCYMGCBS42\nZcoUAF544QUXs1lrVm4DzJo1C4DZs2e7mHXM7bnnni5mnXBPP/20i61cuTLjXqsycy+OMrhIwHI+\nTOZn3LfffhuAnXfe2cWOPvpoAEaNGpXxu9ttt527ts4Kf2jD/ormepmehsmSUdnny+9Qa9euHQD3\n3HOPi9mMs/fee8/FXn75ZSC9Q81mqD344IMuZs+XP4/dhkv9YVO7tnnlviZNmrhr63CbO3eui/nD\nY+XRMJmIxFIDFwlYzjrZbML/o48+6mJWmv/+9793sVdffXWz7+GPIZqZM2cmdIdSzGy8umvXri5m\ns8asLAcYM2YMAC+++KKLWeeZX47bclGfjVfb633+Hm/2c5v5BqkONfs6ALBw4UIgvUNNnWwikrVq\nzeANGzZ011deeSUAhx12mIvZX7A//vGPLhbXWSESp1WrVu7aNlTwO3HHjRsHwHPPPedicR1bljX9\nTjvLoHHZ1c/WcZnWfu539tqaiRUrVpT7u9rwQUSypgYuErBqLdGPOuood33FFVcA6Z1ie+21F5A+\nQ00kW/5Xu+7duwPpu7dYh25F4802C84v0a3M9t/PXucvDimvzPY/1zrc/LI9F4cwKIOLBKxaM7i/\nq6Tx54T783dFshU379w6dP0svHjx4qzeL24TCJt55m8oYtnXH0KL+93yNh6J2/fNrwKSnoWpDC4S\nMDVwkYBVa4l+3HHHZcSGDh3qrq+77joAnn/+eRebPHlydd6SBMBKWn/Rh80e8zusBgwYAMCXX37p\nYlYC+7ut2O/6pbIdjNC8eXMXs5La/wxbfhpXlvvbK1vnctzhCtW5OEoZXCRg1bpcNNvOA/9nd999\nN5C+xM+WhE6dOtXF4vZTsyWA7777rovloiNPy0WTke3zZUtDTzjhBBf7wx/+AKQ/c9bJdu+997pY\nixYtMl5nHV/+3HHrDB4yZIiLffrppwD85z//cbHvvvsOSK8IbDadfzySZX0/ZjM544bnfFouKiKx\n1MBFAlatJfrtt9/uri+99NLK/GqV+Htr2YIDv5xLmkr0ZGT7fNnpsf4y0B122AFIn3lmu6dYWQ6p\njjJ/yaeVyP5CJyvb/dNFraPszjvvdDHblcj/DDuv7Mknn3SxuN2GypvB9qOz01Sii0imas3g/jCG\nHQXjb/hgf0W33XZbF4s77qUq7L/v+uuvd7Hhw4cn/RnK4Ako7/nynyU7tMB2QwXo2bMnkNqpF+CT\nTz4B4KyzznKxHXfcEUjvFLNM6md1u/afR+sg84dy7YRaf8ab7eR73333uZgtE43rePY/I64zWhlc\nRGKpgYsErCBOF91///3dtXVq+CV1//79q/wZ/jbMtjVzUlSiJ6O858svn62TrU2bNi5mBxX4HVdW\n+nbq1MnFjjzySCC9M86WLfsdZVb++2PUNgvN37vNPs/OLYPUDE47FOHH92WyncGmEl1EYhXE6aKv\nvfZaRqxPnz7u2jK4v0zvn//8JwD/+Mc/XOxXv/oVACeeeGK13Kfkj/9vb5nPOrgg1QHmZ0XL4H52\ntRmSixYtcrEPPvgASF/ebJ1m/h5vdg/+/PTydkGN25k16T3XKqIMLhIwNXCRgBVEiR7HTgoFuPnm\nm4H0jpazzz4bgC5durjY4MGDN/t+2j0mHHagQUXsefGfmy+++CIjZks+t9lmm4yYP/5up4r6s9ts\ngYqdigswb948IPfleBxlcJGAFcQwWRx/ZtADDzwAwM9+9rOsftcfkhg9ejQAJ598soslfS64hsmS\nUdnny8+k9m/uD2vFzTu3Tjg7rxtSWdpmuQHceuutQPpsOXs/vyNv0qRJQPqBHrZLsP+5cR1u2dIw\nmYjEUgMXCVjBdrL5s4VsfNs/68yW5PnnU9npoyNHjnQxf0achMXfHSVO3PliNja+fPlyF2vdujUA\nu+++u4t169YNSJ/xZjPZ/N2GzjzzTCD+a59flid9ami2lMFFAlawnWwVOeWUU4DUzpkAN9xwAwDz\n58/P6b2oky0Z1fV8+csxLZP6HXTG5qRD6ux6fx67zXh77LHHXMyOJ/KH05I+ikidbCISSw1cJGBF\nW6IXEpXoycjl8+WPl1sbiNtNyB/LttdV1LmXNJXoIhKrYIfJRKpTXOXqz1CzDO8fNZT0foG5UHx3\nLCJZUwMXCZhKdJEYFZXw5Yk7aTRfS0eVwUUCpgwuUklxQ2y+QtjowSiDiwRMDVwkYJUt0RcCM6rj\nRopYx3zfQECK4vnKcQlepeerUlNVRaS4qEQXCZgauEjA1MBFAqYGLhIwNXCRgKmBiwRMDVwkYGrg\nIgFTAxcJmBq4SMDUwEUCpgYuErBKrSbTvujxtC96MvR8xdO+6CISSw1cJGBq4CIBUwMXCZgauEjA\n1MBFAqYGLhIwNXCRgKmBiwRMDVwkYDqbTKSSttoqlRezPXE0jp1xVp1nEyiDiwRMDVwkYCrRRcr4\nxwLXrl17sz/3S3SLrVmzJuP1fukdV4bb+2zatGkL77hiyuAiASuqDN6lSxd3vfXWWwNw9NFHu9jg\nwYOB9I6Pu+++G4AJEya42NSpU6vzNqXA+JnZrmvUqOFiPXr0AGDfffd1MXu+atWqlfG722yzjYt9\n//33AHz11VcuNmXKFAA++ugjF7MMX1EHXdIdb8rgIgFTAxcJWKXOB8/lljo9e/Z01xdeeCEAxxxz\njItZCZWtjRs3uuuvv/4agPHjx7vYxRdfDMD69esrfa/asikZVXm+rLT1y3Erh+vVq+di22+/PQBX\nXnmlix122GEA1KyZ+sa6cuVKAJYsWeJizZs3B9LL9nXr1gGpUh1Sz9fo0aNdbNq0aQD897//dTF7\nJv1SPa49assmEYlVEBm8d+/e7vqCCy4A4Pjjj3exxo0bZ/yO/cV8++23Xcz+Sv7mN79xsQ8//BCA\n3XbbzcXmzJkDpGf1W265BUh1ylWGMngyKvt8+R1l9hxblgVo3bo1AIcffriL/epXvwKgRYsWGe+z\nYcMGF5s7dy4An332mYtZ9m/VqpWLrV69GkgfJrPsv2DBAhd76qmnABg1apSL2ectW7bMxeI63pTB\nRSSWGrhIwPJaot9zzz1A+lh2XOfZa6+9BsCnn37qYldddRUAa9euzXj9G2+84a5/8YtfAPDAAw+4\nWJ8+fQCYN2+ei3Xo0AFIH+P0S6zyqERPRrbPl3Wk+Z1iTZs2BeDaa691Mes8a9KkiYvVqVMHSB+P\nnj59OgD33Xefi7377rsAdO3a1cWOPPJIAOrWretin3/+OZD+3FoH8U477eRir7/+OgC33367i33w\nwQdAqqMO1MkmIpWQs5ls9lfP7wA766yzgPShDcuaI0aMcDH7q7dq1aqsPiuuA+X66693sbFjxwLQ\nsWPHrO9fCodlOT+TWnY98MADXaxdu3ZA+lzv7777DoB//etfLvb8888DqY41gAYNGgDpHbH2LM2f\nP9/FrFPPz9ZHHXVUxv1ZJ6//DFvm9p//pJeOKoOLBEwNXCRgOSvRbSHI5Zdf7mJWmvizgI499lgg\n1QFREX8sdNtttwXSy68xY8YA0KxZs4zf9UujkSNHArB06dKsPlfyz5+htueeewJQv359F7Py+ttv\nv3Wxt956C0h1okFqPNrvjNthhx0A2HXXXV3Mvj526tTJxazDtlevXi5mpbmNkUNq1qTfoWaqsitM\nRZTBRQKWswxumTZucbvfkbH77rsDcNxxx7lYt27dMn7HZg75nRt2vXDhQhez2Uxx/GGy4cOHA+mz\nmaQwWeXl/1s98cQTQHrGtaEwfyjVhrD8ITZjM9Ug9dz4z6ZlZn8mm8X81y1fvhyAGTNmuJjN0NyS\ntQ5VoQwuEjA1cJGA5Wwmm3WIPProoy42ZMgQIL1jxMqquPvyy3u/cy0bfkfGs88+C8Avf/lLF7MF\nKFtCM9mSUdnna9CgQe7aZqP5y0BtHNxmr0FqVyB/AZMtCfWfEXsO/cUrNlvOXy5qv+N/LbQOYuu4\nBXjppZeA9OfaFqVURDPZRCRWXuei21/EK664wsVsuGPRokUuNnPmTCD9L/HOO+8MpC8DLY+/DNTm\nsSc1JKYMnoxsny/rIPPnkxt/9pgNjfpDpFa1+VnYnrVGjRq5mP186NChLmbZ3B9etSxss+EA/vSn\nPwHpWd1mv/mdcdm2PWVwEYmlBi4SsILY0WVL2Gy1k08+OeNnK1ascNeXXnopAA8++KCLJb3RvEr0\nZFT2+YpbpOHHrCPWH/O2r3n+GLqV+v6ST/va2LdvXxez2W1+6X3vvfcCqRlyAJMnT864V/u8LXn2\nVKKLSKyiOvjAX2p6wgknbPZ15513nrt+7LHHqvWeJH/iqk8/g8dlS8uk/u9ahvdnvFknm5/9Fy9e\nDKSGWSG11sGG6Tb3GdU537w8yuAiAVMDFwlYUZTotvPLNddc42JxiwVsf6xnnnkmNzcmBSeu9N7c\nz411vPkLnPbee28gfbzclp0+/vjjLmZzNPxloNV5WmhlKYOLBKxgM7g/Q81mBjVs2DDjdf58Xutc\ni1tULz89fvY1cSeE2lDYKaec4mK2r5+/h5p1qPm7+8Z1qBUSZXCRgKmBiwSsYEt0/zwpfxGAsdLp\niCOOcLEJEyZU/41JQfNLZds9xd+7zWat7bXXXi529dVXA9CyZUsXs3FrOykUUnMq/F1ZCrU0N8rg\nIgEruAxu2dqftRbnkUceAWDcuHHVfUtSpCwL+8tKreI788wzXcz2X/OHt1555RUgfWjWlnwWetb2\nKYOLBEwNXCRgBVGi++PbX3zxBRA/hvnJJ5+4azvIXWRzrJRu06aNi51xxhkA9OvXz8VsWak9ewB3\n3nknkL5XX74WjFSFMrhIwAoig++3337uun379kB8R8Yll1ziruPOBReJ4x+cYZnbrxDtAAw7rx5g\n4sSJQHrHm78UtVgog4sETA1cJGAFUaLfdNNN7jquNL/99tsBeOONN3J2T1Kc/DLaDtSw5wdSs9r8\n140ePRqAp59+2sXsOYxb+hm3F1yhUgYXCVhBZHD/eBj762izhgDuuOOOnN+TFCe/88zOpPfXKNjZ\n3rNnz3axUaNGAem7pdrQWUVDY7aphH+gQSFRBhcJmBq4SMAKokT/85//nHHtd7xV5eRP+WmoXbs2\nkF4q//e//wVS591BaraanU8H8PLLL6e9B6R2BaqoQ80+zz/tVnuyiUhOFO3RRYVERxclo7qer4pm\noBX6UJeOLhKRWGrgIgGrbCfbQmBGddxIEeuY7xsISLU8X4VeglegSs9Xpb6Di0hxUYkuEjA1cJGA\nqYGLBEwNXCRgauAiAVMDFwmYGrhIwNTARQKmBi4SsP8PSoKR2UX/FhIAAAAASUVORK5CYII=\n",
360 | "text/plain": [
361 | ""
362 | ]
363 | },
364 | "metadata": {},
365 | "output_type": "display_data"
366 | }
367 | ],
368 | "source": [
369 | "plot_images(disp_imgs, \"Restricted Boltzmann Machine\", test_disp)\n",
370 | "plt.show()"
371 | ]
372 | }
373 | ],
374 | "metadata": {
375 | "kernelspec": {
376 | "display_name": "Python 3",
377 | "language": "python",
378 | "name": "python3"
379 | },
380 | "language_info": {
381 | "codemirror_mode": {
382 | "name": "ipython",
383 | "version": 3
384 | },
385 | "file_extension": ".py",
386 | "mimetype": "text/x-python",
387 | "name": "python",
388 | "nbconvert_exporter": "python",
389 | "pygments_lexer": "ipython3",
390 | "version": "3.6.2"
391 | }
392 | },
393 | "nbformat": 4,
394 | "nbformat_minor": 2
395 | }
396 |
--------------------------------------------------------------------------------