├── .gitattributes
├── .gitignore
├── Lab01_Environment_Preparation
├── 01_InstallGit.pdf
├── 02_InstallAnaconda.pdf
├── 03_InstallTensorflow.pdf
└── tensorflow-1.0.0-cp35-cp35m-win_amd64.whl
├── Lab02_Basic_Tensorflow
├── .ipynb_checkpoints
│ ├── Lab02_Understand_Tensor-checkpoint.ipynb
│ └── Lab3_Understand_Tensor-checkpoint.ipynb
├── Lab02_Understand_Tensor.ipynb
└── images
│ ├── pic1.JPG
│ ├── pic2.bmp
│ ├── pic3.jpg
│ ├── pic4.JPG
│ ├── pic4.bmp
│ └── pic5.jpg
├── Lab03_Perceptron
├── .ipynb_checkpoints
│ ├── Lab03_Perceptron_and_Backprob-checkpoint.ipynb
│ └── Lab4_Perceptron_and_Backprob-checkpoint.ipynb
├── 538.json
├── Lab03_Perceptron_and_Backprob.ipynb
├── __pycache__
│ └── utils.cpython-35.pyc
├── images
│ ├── 1.JPG
│ ├── logic_table.gif
│ ├── lr.png
│ └── xor.png
└── utils.py
├── Lab04_Simple_Softmax
├── .ipynb_checkpoints
│ ├── 04_Simple_Softmax-checkpoint.ipynb
│ └── 05_Simple_Softmax-checkpoint.ipynb
├── 04_Simple_Softmax.ipynb
└── images
│ ├── 3.png
│ ├── optimizer1.gif
│ ├── optimizer2.gif
│ └── softmax_regression_en.png
├── Lab05_Multi_Layer_Percentron
├── .ipynb_checkpoints
│ ├── Lab05_Multilayer_Perceptron-checkpoint.ipynb
│ └── Lab6_Multilayer_Perceptron-checkpoint.ipynb
└── Lab05_Multilayer_Perceptron.ipynb
├── Lab06_Deep_Learning
├── .ipynb_checkpoints
│ └── Lab06_Deep_Learning-checkpoint.ipynb
└── Lab06_Deep_Learning.ipynb
├── Lab07_Deep_Learning_Ext
├── .ipynb_checkpoints
│ └── Lab07_Deep_Learning_Ext-checkpoint.ipynb
└── Lab07_Deep_Learning_Ext.ipynb
├── Lab08_Convolution_Neural_Network
├── .ipynb_checkpoints
│ └── LAB08_Convolution_Neural_Network-checkpoint.ipynb
├── LAB08_Convolution_Neural_Network.ipynb
├── TFLEARN.ipynb
└── images
│ ├── convo1.mp4
│ ├── convo2.mp4
│ ├── convo_in_01.mp4
│ ├── convo_in_02.mp4
│ ├── convo_in_03.mp4
│ ├── model.jpg
│ └── model.png
├── Lab09_Deep_Q_Learning
├── LAB09_Deep_Q_Learning.ipynb
├── LAB09_Deep_Q_Learning_fin.ipynb
├── assets
│ ├── audio
│ │ ├── die.ogg
│ │ ├── die.wav
│ │ ├── hit.ogg
│ │ ├── hit.wav
│ │ ├── point.ogg
│ │ ├── point.wav
│ │ ├── swoosh.ogg
│ │ ├── swoosh.wav
│ │ ├── wing.ogg
│ │ └── wing.wav
│ └── sprites
│ │ ├── 0.png
│ │ ├── 1.png
│ │ ├── 2.png
│ │ ├── 3.png
│ │ ├── 4.png
│ │ ├── 5.png
│ │ ├── 6.png
│ │ ├── 7.png
│ │ ├── 8.png
│ │ ├── 9.png
│ │ ├── background-black.png
│ │ ├── base.png
│ │ ├── pipe-green.png
│ │ ├── redbird-downflap.png
│ │ ├── redbird-midflap.png
│ │ └── redbird-upflap.png
├── deep_q_network.py
├── game
│ ├── __pycache__
│ │ ├── flappy_bird_utils.cpython-35.pyc
│ │ └── wrapped_flappy_bird.cpython-35.pyc
│ ├── flappy_bird_utils.py
│ └── wrapped_flappy_bird.py
├── logs_bird
│ ├── hidden.txt
│ └── readout.txt
└── saved_networks
│ ├── bird-dqn-2450000.data-00000-of-00001
│ ├── bird-dqn-2450000.index
│ ├── bird-dqn-2450000.meta
│ └── checkpoint
├── Lab10_Recurrent_NeuralNetwork
├── .ipynb_checkpoints
│ └── Tumbon-checkpoint.ipynb
├── LABXX_Basic_RNN.ipynb
├── LabXX_LSTMCell.ipynb
├── Tumbon.ipynb
├── US_Cities.txt
└── images
│ └── rnn.jpg
├── Lacture_01_Understand_Machine_Learning
└── 01_Machine_Learning.pdf
├── README.md
└── data
├── t10k-images-idx3-ubyte.gz
├── t10k-labels-idx1-ubyte.gz
├── train-images-idx3-ubyte.gz
└── train-labels-idx1-ubyte.gz
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 |
7 | # Standard to msysgit
8 | *.doc diff=astextplain
9 | *.DOC diff=astextplain
10 | *.docx diff=astextplain
11 | *.DOCX diff=astextplain
12 | *.dot diff=astextplain
13 | *.DOT diff=astextplain
14 | *.pdf diff=astextplain
15 | *.PDF diff=astextplain
16 | *.rtf diff=astextplain
17 | *.RTF diff=astextplain
18 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Windows image file caches
2 | Thumbs.db
3 | ehthumbs.db
4 |
5 | # Folder config file
6 | Desktop.ini
7 |
8 | # Recycle Bin used on file shares
9 | $RECYCLE.BIN/
10 |
11 | # Windows Installer files
12 | *.cab
13 | *.msi
14 | *.msm
15 | *.msp
16 |
17 | # Windows shortcuts
18 | *.lnk
19 |
20 | # =========================
21 | # Operating System Files
22 | # =========================
23 |
24 | # OSX
25 | # =========================
26 |
27 | .DS_Store
28 | .AppleDouble
29 | .LSOverride
30 |
31 | # Thumbnails
32 | ._*
33 |
34 | # Files that might appear in the root of a volume
35 | .DocumentRevisions-V100
36 | .fseventsd
37 | .Spotlight-V100
38 | .TemporaryItems
39 | .Trashes
40 | .VolumeIcon.icns
41 |
42 | # Directories potentially created on remote AFP share
43 | .AppleDB
44 | .AppleDesktop
45 | Network Trash Folder
46 | Temporary Items
47 | .apdisk
48 |
--------------------------------------------------------------------------------
/Lab01_Environment_Preparation/01_InstallGit.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab01_Environment_Preparation/01_InstallGit.pdf
--------------------------------------------------------------------------------
/Lab01_Environment_Preparation/02_InstallAnaconda.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab01_Environment_Preparation/02_InstallAnaconda.pdf
--------------------------------------------------------------------------------
/Lab01_Environment_Preparation/03_InstallTensorflow.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab01_Environment_Preparation/03_InstallTensorflow.pdf
--------------------------------------------------------------------------------
/Lab01_Environment_Preparation/tensorflow-1.0.0-cp35-cp35m-win_amd64.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab01_Environment_Preparation/tensorflow-1.0.0-cp35-cp35m-win_amd64.whl
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/.ipynb_checkpoints/Lab02_Understand_Tensor-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "
#Lab 1 Understand Tensor and Flow by Comdet Phaudphut
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {
14 | "collapsed": true
15 | },
16 | "outputs": [],
17 | "source": [
18 | "from __future__ import absolute_import\n",
19 | "from __future__ import division\n",
20 | "from __future__ import print_function\n",
21 | "import tensorflow as tf\n",
22 | "import numpy as np\n",
23 | "import os\n",
24 | "import time"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "Tensorflow constant
\n",
32 | "ใช้สำหรับประกาศ Tensor แบบเป็นค่าคงที่ เปลี่ยนแปลงค่าไม่ได้ในระหว่างรัน
\n",
33 | "ตัวอย่าง x มีค่า = 5
\n",
34 | "y มีค่า = 6
\n",
35 | "result ผลลัพท์เป็น tensor เหมือนกัน"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 2,
41 | "metadata": {
42 | "collapsed": false
43 | },
44 | "outputs": [
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "Tensor(\"mul:0\", shape=(), dtype=int32)\n",
50 | "Tensor(\"Const:0\", shape=(), dtype=int32)\n"
51 | ]
52 | }
53 | ],
54 | "source": [
55 | "x = tf.constant(5)\n",
56 | "y = tf.constant(6)\n",
57 | "result = x*y\n",
58 | "print(result)\n",
59 | "print(x)"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "Tensorflow variable
\n",
67 | "ใช้สำหรับประกาศตัวแปร เปลี่ยนแปลงค่าได้ระหว่างการรัน เช่นอาจทำการเก็บค่าจากการคำนวณต่างๆ
\n",
68 | "ในการสร้างนั้นอาจจะต้องมีการ Init ค่าให้ด้วย
\n",
69 | "ถ้ามีการใช้ตัวแปรจะต้องมี Initailize ให้ตัวแปรก่อนการใช้งาน
\n",
70 | "init_op = tf.global_variables_initializer()
\n",
71 | "tf.global_variables_initializer().run()
"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 3,
77 | "metadata": {
78 | "collapsed": false
79 | },
80 | "outputs": [
81 | {
82 | "name": "stdout",
83 | "output_type": "stream",
84 | "text": [
85 | "Tensor(\"weights/read:0\", shape=(2, 6), dtype=float32)\n",
86 | "Tensor(\"biases/read:0\", shape=(6,), dtype=float32)\n"
87 | ]
88 | }
89 | ],
90 | "source": [
91 | "#w = tf.Variable(, name=)\n",
92 | "weights = tf.Variable(tf.random_normal([2, 6]), name=\"weights\")\n",
93 | "biases = tf.Variable(tf.zeros([6]), name=\"biases\")\n",
94 | "print(weights)\n",
95 | "print(biases)"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "Tensorflow PlaceHolder
\n",
103 | "placeholder ใช้ในกรณีที่ต้องการ feed ค่าจากข้อมููลจากภายนอกไปเก็บไว้ใน tensor
\n",
104 | "มักร่วมกันกับ feed_dict เพื่อเชื่อมโยงข้อมููล"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 4,
110 | "metadata": {
111 | "collapsed": false
112 | },
113 | "outputs": [
114 | {
115 | "name": "stdout",
116 | "output_type": "stream",
117 | "text": [
118 | "Tensor(\"Placeholder:0\", shape=(5, 5), dtype=float32)\n",
119 | "Tensor(\"Placeholder_1:0\", shape=(?, 5), dtype=float32)\n",
120 | "Tensor(\"MatMul:0\", shape=(5, 5), dtype=float32)\n"
121 | ]
122 | }
123 | ],
124 | "source": [
125 | "inp = tf.placeholder(tf.float32,[5,5])\n",
126 | "data = tf.placeholder(tf.float32,[None,5])\n",
127 | "outp = tf.matmul(inp,data)\n",
128 | "print(inp)\n",
129 | "print(data)\n",
130 | "print(outp)"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | " Everything running thorght session
\n",
138 | "หลักการทำงานของ Tensorflow นั้นจะไม่ทำงานทันทีที่รันโค้ด แต่จะมีการสร้างโครงกราฟคำนวณขึึ้นมา เรียกว่า Computation Graph
\n",
139 | "และการรันกราฟนั้นจะต้องทำผ่าน Session"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 5,
145 | "metadata": {
146 | "collapsed": false
147 | },
148 | "outputs": [
149 | {
150 | "name": "stdout",
151 | "output_type": "stream",
152 | "text": [
153 | "30\n"
154 | ]
155 | }
156 | ],
157 | "source": [
158 | "sess = tf.Session()\n",
159 | "res = sess.run(result)\n",
160 | "print(res)\n",
161 | "sess.close()"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": 6,
167 | "metadata": {
168 | "collapsed": false
169 | },
170 | "outputs": [
171 | {
172 | "name": "stdout",
173 | "output_type": "stream",
174 | "text": [
175 | "30\n"
176 | ]
177 | }
178 | ],
179 | "source": [
180 | "with tf.Session() as sess:\n",
181 | " output = sess.run(x*y)\n",
182 | " print(output)"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": 7,
188 | "metadata": {
189 | "collapsed": false
190 | },
191 | "outputs": [
192 | {
193 | "name": "stdout",
194 | "output_type": "stream",
195 | "text": [
196 | "#############\n",
197 | "30\n",
198 | "#############\n",
199 | "[[-1.06843245 0.70515025 -0.99292415 0.13744356 0.20356697 0.70010537]\n",
200 | " [-1.14673483 1.0408721 -2.02005363 1.05647695 -0.2939561 -1.72112107]]\n",
201 | "#############\n",
202 | "[[ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
203 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
204 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
205 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
206 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]]\n"
207 | ]
208 | }
209 | ],
210 | "source": [
211 | "sess = tf.InteractiveSession()\n",
212 | "tf.global_variables_initializer().run()\n",
213 | "print(\"#############\")\n",
214 | "print(result.eval())\n",
215 | "print(\"#############\")\n",
216 | "print(weights.eval())\n",
217 | "print(\"#############\")\n",
218 | "########### feed dict use for feed data to placeholder ############\n",
219 | "data_inp = np.ones([5,5])\n",
220 | "data_res = np.random.random([5,5])\n",
221 | "print(outp.eval(feed_dict={inp: data_inp, data: data_res}))"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "load and save variable
\n",
229 | "ในการทำงานอาจจะต้องมีการ save/load ค่าทีได้จากการคำนวณไว้ใช้งาน หรือป้องกันการเกิดกรณีที่ไม่คาดคิดเช่นไฟดับขณะรันโปรแกรม
\n",
230 | "ทุกครั้งที่ทำการสร้าง sess และ Init variable ขึ้นมาใหม่ค่าเดิมจะถููกล้างทั้งหมด"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {
237 | "collapsed": false
238 | },
239 | "outputs": [],
240 | "source": [
241 | "print(\"===========\")\n",
242 | "print(weights.eval())\n",
243 | "print(\"===========\")\n",
244 | "if not os.path.isdir('checkpoint'):\n",
245 | " os.mkdir('checkpoint')\n",
246 | " \n",
247 | "######## ทำการสร้างตัว Save/Load ########\n",
248 | "saver = tf.train.Saver()\n",
249 | "#saving model \n",
250 | "saver.save(sess, 'checkpoint/model', global_step=1)\n",
251 | "sess.close()\n",
252 | "####### re-init variable\n",
253 | "sess = tf.InteractiveSession()\n",
254 | "tf.global_variables_initializer().run()\n",
255 | "print(\"===========\")\n",
256 | "print(weights.eval())\n",
257 | "print(\"===========\")\n",
258 | "########## loading model\n",
259 | "checkpoint = tf.train.get_checkpoint_state('checkpoint/')\n",
260 | "if checkpoint and checkpoint.model_checkpoint_path:\n",
261 | " saver.restore(sess, checkpoint.model_checkpoint_path)\n",
262 | " print ('Successfully loaded:', checkpoint.model_checkpoint_path)\n",
263 | "print(\"===========\")\n",
264 | "print(weights.eval())\n",
265 | "print(\"===========\")"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {
271 | "collapsed": true
272 | },
273 | "source": [
274 | " Writing Graph Log
\n",
275 | "เราสามารถใช้ตัว summary เป็นตัวเก็บสถานะของการคำนวณเพื่อนำไปแสดงผลให้อยู่ในรูปแบบที่เข้าใจได้ง่าย
"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 8,
281 | "metadata": {
282 | "collapsed": false
283 | },
284 | "outputs": [
285 | {
286 | "name": "stdout",
287 | "output_type": "stream",
288 | "text": [
289 | "63\n"
290 | ]
291 | }
292 | ],
293 | "source": [
294 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
295 | "import tensorflow as tf\n",
296 | "import os\n",
297 | "\n",
298 | "if not os.path.isdir('logs'):\n",
299 | " os.mkdir('logs')\n",
300 | "a = tf.add(1, 2)\n",
301 | "b = tf.multiply(a, 3)\n",
302 | "c = tf.add(4, 5,)\n",
303 | "d = tf.multiply(c, 6,)\n",
304 | "e = tf.multiply(4, 5,)\n",
305 | "f = tf.div(c, 6,)\n",
306 | "g = tf.add(b, d)\n",
307 | "h = tf.multiply(g, f)\n",
308 | "init = tf.global_variables_initializer()\n",
309 | "with tf.Session() as sess:\n",
310 | " sess.run(init)\n",
311 | " writer = tf.summary.FileWriter(\"logs\", sess.graph) #graph=tf.get_default_graph() \n",
312 | " print(sess.run(h))\n",
313 | " writer.close()"
314 | ]
315 | },
316 | {
317 | "cell_type": "markdown",
318 | "metadata": {},
319 | "source": [
320 | "การใช้งาน Tensorboard
\n",
321 | "
\n",
322 | "
\n",
323 | "
\n",
324 | "
\n",
325 | "
\n",
326 | "
\n",
327 | "
\n",
328 | "
"
329 | ]
330 | },
331 | {
332 | "cell_type": "code",
333 | "execution_count": null,
334 | "metadata": {
335 | "collapsed": false
336 | },
337 | "outputs": [],
338 | "source": [
339 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
340 | "import tensorflow as tf\n",
341 | "import os\n",
342 | "if not os.path.isdir('logs_scope'):\n",
343 | " os.mkdir('logs_scope')\n",
344 | "with tf.name_scope(\"OperationGroup_1\"):\n",
345 | " with tf.name_scope(\"Scope1\"):\n",
346 | " a = tf.add(1, 2, name=\"A_add\")\n",
347 | " b = tf.multiply(a, 3, name=\"B_multiply\")\n",
348 | " with tf.name_scope(\"Scope2\"):\n",
349 | " c = tf.add(4, 5, name=\"C_add\")\n",
350 | " d = tf.multiply(c, 6, name=\"D_multiply\")\n",
351 | "\n",
352 | "with tf.name_scope(\"Scope_3\"):\n",
353 | " e = tf.multiply(4, 5, name=\"E_add\")\n",
354 | " f = tf.div(c, 6, name=\"f_div\")\n",
355 | "g = tf.add(b, d,name=\"g_add\")\n",
356 | "h = tf.multiply(g, f)\n",
357 | "init = tf.global_variables_initializer()\n",
358 | "with tf.Session() as sess:\n",
359 | " sess.run(init)\n",
360 | " writer = tf.summary.FileWriter(\"logs_scope\", sess.graph) #graph=tf.get_default_graph() \n",
361 | " print(sess.run(h))\n",
362 | " writer.close()"
363 | ]
364 | },
365 | {
366 | "cell_type": "markdown",
367 | "metadata": {},
368 | "source": [
369 | "
\n",
370 | "
"
371 | ]
372 | },
373 | {
374 | "cell_type": "code",
375 | "execution_count": null,
376 | "metadata": {
377 | "collapsed": true
378 | },
379 | "outputs": [],
380 | "source": []
381 | }
382 | ],
383 | "metadata": {
384 | "kernelspec": {
385 | "display_name": "Tensorflow 3",
386 | "language": "python",
387 | "name": "tensorflow"
388 | },
389 | "language_info": {
390 | "codemirror_mode": {
391 | "name": "ipython",
392 | "version": 3
393 | },
394 | "file_extension": ".py",
395 | "mimetype": "text/x-python",
396 | "name": "python",
397 | "nbconvert_exporter": "python",
398 | "pygments_lexer": "ipython3",
399 | "version": "3.5.3"
400 | }
401 | },
402 | "nbformat": 4,
403 | "nbformat_minor": 2
404 | }
405 |
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/.ipynb_checkpoints/Lab3_Understand_Tensor-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "#Lab 1 Understand Tensor and Flow by Comdet Phaudphut
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {
14 | "collapsed": true
15 | },
16 | "outputs": [],
17 | "source": [
18 | "from __future__ import absolute_import\n",
19 | "from __future__ import division\n",
20 | "from __future__ import print_function\n",
21 | "import tensorflow as tf\n",
22 | "import numpy as np\n",
23 | "import os\n",
24 | "import time"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "Tensorflow constant
\n",
32 | "ใช้สำหรับประกาศ Tensor แบบเป็นค่าคงที่ เปลี่ยนแปลงค่าไม่ได้ในระหว่างรัน
\n",
33 | "ตัวอย่าง x มีค่า = 5
\n",
34 | "y มีค่า = 6
\n",
35 | "result ผลลัพท์เป็น tensor เหมือนกัน"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 2,
41 | "metadata": {
42 | "collapsed": false
43 | },
44 | "outputs": [
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "Tensor(\"mul:0\", shape=(), dtype=int32)\n",
50 | "Tensor(\"Const:0\", shape=(), dtype=int32)\n"
51 | ]
52 | }
53 | ],
54 | "source": [
55 | "x = tf.constant(5)\n",
56 | "y = tf.constant(6)\n",
57 | "result = x*y\n",
58 | "print(result)\n",
59 | "print(x)"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "Tensorflow variable
\n",
67 | "ใช้สำหรับประกาศตัวแปร เปลี่ยนแปลงค่าได้ระหว่างการรัน เช่นอาจทำการเก็บค่าจากการคำนวณต่างๆ
\n",
68 | "ในการสร้างนั้นอาจจะต้องมีการ Init ค่าให้ด้วย
\n",
69 | "ถ้ามีการใช้ตัวแปรจะต้องมี Initailize ให้ตัวแปรก่อนการใช้งาน
\n",
70 | "init_op = tf.global_variables_initializer()
\n",
71 | "tf.global_variables_initializer().run()
"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 3,
77 | "metadata": {
78 | "collapsed": false
79 | },
80 | "outputs": [
81 | {
82 | "name": "stdout",
83 | "output_type": "stream",
84 | "text": [
85 | "Tensor(\"weights/read:0\", shape=(2, 6), dtype=float32)\n",
86 | "Tensor(\"biases/read:0\", shape=(6,), dtype=float32)\n"
87 | ]
88 | }
89 | ],
90 | "source": [
91 | "#w = tf.Variable(, name=)\n",
92 | "weights = tf.Variable(tf.random_normal([2, 6]), name=\"weights\")\n",
93 | "biases = tf.Variable(tf.zeros([6]), name=\"biases\")\n",
94 | "print(weights)\n",
95 | "print(biases)"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "Tensorflow PlaceHolder
\n",
103 | "placeholder ใช้ในกรณีที่ต้องการ feed ค่าจากข้อมููลจากภายนอกไปเก็บไว้ใน tensor
\n",
104 | "มักร่วมกันกับ feed_dict เพื่อเชื่อมโยงข้อมููล"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 4,
110 | "metadata": {
111 | "collapsed": false
112 | },
113 | "outputs": [
114 | {
115 | "name": "stdout",
116 | "output_type": "stream",
117 | "text": [
118 | "Tensor(\"Placeholder:0\", shape=(5, 5), dtype=float32)\n",
119 | "Tensor(\"Placeholder_1:0\", shape=(?, 5), dtype=float32)\n",
120 | "Tensor(\"MatMul:0\", shape=(5, 5), dtype=float32)\n"
121 | ]
122 | }
123 | ],
124 | "source": [
125 | "inp = tf.placeholder(tf.float32,[5,5])\n",
126 | "data = tf.placeholder(tf.float32,[None,5])\n",
127 | "outp = tf.matmul(inp,data)\n",
128 | "print(inp)\n",
129 | "print(data)\n",
130 | "print(outp)"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | " Everything running thorght session
\n",
138 | "หลักการทำงานของ Tensorflow นั้นจะไม่ทำงานทันทีที่รันโค้ด แต่จะมีการสร้างโครงกราฟคำนวณขึึ้นมา เรียกว่า Computation Graph
\n",
139 | "และการรันกราฟนั้นจะต้องทำผ่าน Session"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 5,
145 | "metadata": {
146 | "collapsed": false
147 | },
148 | "outputs": [
149 | {
150 | "name": "stdout",
151 | "output_type": "stream",
152 | "text": [
153 | "30\n"
154 | ]
155 | }
156 | ],
157 | "source": [
158 | "sess = tf.Session()\n",
159 | "res = sess.run(result)\n",
160 | "print(res)\n",
161 | "sess.close()"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": 6,
167 | "metadata": {
168 | "collapsed": false
169 | },
170 | "outputs": [
171 | {
172 | "name": "stdout",
173 | "output_type": "stream",
174 | "text": [
175 | "30\n"
176 | ]
177 | }
178 | ],
179 | "source": [
180 | "with tf.Session() as sess:\n",
181 | " output = sess.run(x*y)\n",
182 | " print(output)"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": 7,
188 | "metadata": {
189 | "collapsed": false
190 | },
191 | "outputs": [
192 | {
193 | "name": "stdout",
194 | "output_type": "stream",
195 | "text": [
196 | "#############\n",
197 | "30\n",
198 | "#############\n",
199 | "[[-1.06843245 0.70515025 -0.99292415 0.13744356 0.20356697 0.70010537]\n",
200 | " [-1.14673483 1.0408721 -2.02005363 1.05647695 -0.2939561 -1.72112107]]\n",
201 | "#############\n",
202 | "[[ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
203 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
204 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
205 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
206 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]]\n"
207 | ]
208 | }
209 | ],
210 | "source": [
211 | "sess = tf.InteractiveSession()\n",
212 | "tf.global_variables_initializer().run()\n",
213 | "print(\"#############\")\n",
214 | "print(result.eval())\n",
215 | "print(\"#############\")\n",
216 | "print(weights.eval())\n",
217 | "print(\"#############\")\n",
218 | "########### feed dict use for feed data to placeholder ############\n",
219 | "data_inp = np.ones([5,5])\n",
220 | "data_res = np.random.random([5,5])\n",
221 | "print(outp.eval(feed_dict={inp: data_inp, data: data_res}))"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "load and save variable
\n",
229 | "ในการทำงานอาจจะต้องมีการ save/load ค่าทีได้จากการคำนวณไว้ใช้งาน หรือป้องกันการเกิดกรณีที่ไม่คาดคิดเช่นไฟดับขณะรันโปรแกรม
\n",
230 | "ทุกครั้งที่ทำการสร้าง sess และ Init variable ขึ้นมาใหม่ค่าเดิมจะถููกล้างทั้งหมด"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {
237 | "collapsed": false
238 | },
239 | "outputs": [],
240 | "source": [
241 | "print(\"===========\")\n",
242 | "print(weights.eval())\n",
243 | "print(\"===========\")\n",
244 | "if not os.path.isdir('checkpoint'):\n",
245 | " os.mkdir('checkpoint')\n",
246 | " \n",
247 | "######## ทำการสร้างตัว Save/Load ########\n",
248 | "saver = tf.train.Saver()\n",
249 | "#saving model \n",
250 | "saver.save(sess, 'checkpoint/model', global_step=1)\n",
251 | "sess.close()\n",
252 | "####### re-init variable\n",
253 | "sess = tf.InteractiveSession()\n",
254 | "tf.global_variables_initializer().run()\n",
255 | "print(\"===========\")\n",
256 | "print(weights.eval())\n",
257 | "print(\"===========\")\n",
258 | "########## loading model\n",
259 | "checkpoint = tf.train.get_checkpoint_state('checkpoint/')\n",
260 | "if checkpoint and checkpoint.model_checkpoint_path:\n",
261 | " saver.restore(sess, checkpoint.model_checkpoint_path)\n",
262 | " print ('Successfully loaded:', checkpoint.model_checkpoint_path)\n",
263 | "print(\"===========\")\n",
264 | "print(weights.eval())\n",
265 | "print(\"===========\")"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {
271 | "collapsed": true
272 | },
273 | "source": [
274 | " Writing Graph Log
\n",
275 | "เราสามารถใช้ตัว summary เป็นตัวเก็บสถานะของการคำนวณเพื่อนำไปแสดงผลให้อยู่ในรูปแบบที่เข้าใจได้ง่าย
"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 8,
281 | "metadata": {
282 | "collapsed": false
283 | },
284 | "outputs": [
285 | {
286 | "name": "stdout",
287 | "output_type": "stream",
288 | "text": [
289 | "63\n"
290 | ]
291 | }
292 | ],
293 | "source": [
294 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
295 | "import tensorflow as tf\n",
296 | "import os\n",
297 | "\n",
298 | "if not os.path.isdir('logs'):\n",
299 | " os.mkdir('logs')\n",
300 | "a = tf.add(1, 2)\n",
301 | "b = tf.multiply(a, 3)\n",
302 | "c = tf.add(4, 5,)\n",
303 | "d = tf.multiply(c, 6,)\n",
304 | "e = tf.multiply(4, 5,)\n",
305 | "f = tf.div(c, 6,)\n",
306 | "g = tf.add(b, d)\n",
307 | "h = tf.multiply(g, f)\n",
308 | "init = tf.global_variables_initializer()\n",
309 | "with tf.Session() as sess:\n",
310 | " sess.run(init)\n",
311 | " writer = tf.summary.FileWriter(\"logs\", sess.graph) #graph=tf.get_default_graph() \n",
312 | " print(sess.run(h))\n",
313 | " writer.close()"
314 | ]
315 | },
316 | {
317 | "cell_type": "markdown",
318 | "metadata": {},
319 | "source": [
320 | "การใช้งาน Tensorboard
\n",
321 | "
\n",
322 | "
\n",
323 | "
\n",
324 | "
\n",
325 | "
\n",
326 | "
"
327 | ]
328 | },
329 | {
330 | "cell_type": "code",
331 | "execution_count": null,
332 | "metadata": {
333 | "collapsed": false
334 | },
335 | "outputs": [],
336 | "source": [
337 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
338 | "import tensorflow as tf\n",
339 | "import os\n",
340 | "if not os.path.isdir('logs_scope'):\n",
341 | " os.mkdir('logs_scope')\n",
342 | "with tf.name_scope(\"OperationGroup_1\"):\n",
343 | " with tf.name_scope(\"Scope1\"):\n",
344 | " a = tf.add(1, 2, name=\"A_add\")\n",
345 | " b = tf.multiply(a, 3, name=\"B_multiply\")\n",
346 | " with tf.name_scope(\"Scope2\"):\n",
347 | " c = tf.add(4, 5, name=\"C_add\")\n",
348 | " d = tf.multiply(c, 6, name=\"D_multiply\")\n",
349 | "\n",
350 | "with tf.name_scope(\"Scope_3\"):\n",
351 | " e = tf.multiply(4, 5, name=\"E_add\")\n",
352 | " f = tf.div(c, 6, name=\"f_div\")\n",
353 | "g = tf.add(b, d,name=\"g_add\")\n",
354 | "h = tf.multiply(g, f)\n",
355 | "init = tf.global_variables_initializer()\n",
356 | "with tf.Session() as sess:\n",
357 | " sess.run(init)\n",
358 | " writer = tf.summary.FileWriter(\"logs_scope\", sess.graph) #graph=tf.get_default_graph() \n",
359 | " print(sess.run(h))\n",
360 | " writer.close()"
361 | ]
362 | },
363 | {
364 | "cell_type": "markdown",
365 | "metadata": {},
366 | "source": [
367 | "
\n",
368 | "
"
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": null,
374 | "metadata": {
375 | "collapsed": true
376 | },
377 | "outputs": [],
378 | "source": []
379 | }
380 | ],
381 | "metadata": {
382 | "kernelspec": {
383 | "display_name": "Tensorflow 3",
384 | "language": "python",
385 | "name": "tensorflow"
386 | },
387 | "language_info": {
388 | "codemirror_mode": {
389 | "name": "ipython",
390 | "version": 3
391 | },
392 | "file_extension": ".py",
393 | "mimetype": "text/x-python",
394 | "name": "python",
395 | "nbconvert_exporter": "python",
396 | "pygments_lexer": "ipython3",
397 | "version": "3.5.3"
398 | }
399 | },
400 | "nbformat": 4,
401 | "nbformat_minor": 2
402 | }
403 |
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/Lab02_Understand_Tensor.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "#Lab 1 Understand Tensor and Flow by Comdet Phaudphut
"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {
14 | "collapsed": true
15 | },
16 | "outputs": [],
17 | "source": [
18 | "from __future__ import absolute_import\n",
19 | "from __future__ import division\n",
20 | "from __future__ import print_function\n",
21 | "import tensorflow as tf\n",
22 | "import numpy as np\n",
23 | "import os\n",
24 | "import time"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "Tensorflow constant
\n",
32 | "ใช้สำหรับประกาศ Tensor แบบเป็นค่าคงที่ เปลี่ยนแปลงค่าไม่ได้ในระหว่างรัน
\n",
33 | "ตัวอย่าง x มีค่า = 5
\n",
34 | "y มีค่า = 6
\n",
35 | "result ผลลัพท์เป็น tensor เหมือนกัน"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 2,
41 | "metadata": {
42 | "collapsed": false
43 | },
44 | "outputs": [
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "Tensor(\"mul:0\", shape=(), dtype=int32)\n",
50 | "Tensor(\"Const:0\", shape=(), dtype=int32)\n"
51 | ]
52 | }
53 | ],
54 | "source": [
55 | "x = tf.constant(5)\n",
56 | "y = tf.constant(6)\n",
57 | "result = x*y\n",
58 | "print(result)\n",
59 | "print(x)"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "Tensorflow variable
\n",
67 | "ใช้สำหรับประกาศตัวแปร เปลี่ยนแปลงค่าได้ระหว่างการรัน เช่นอาจทำการเก็บค่าจากการคำนวณต่างๆ
\n",
68 | "ในการสร้างนั้นอาจจะต้องมีการ Init ค่าให้ด้วย
\n",
69 | "ถ้ามีการใช้ตัวแปรจะต้องมี Initailize ให้ตัวแปรก่อนการใช้งาน
\n",
70 | "init_op = tf.global_variables_initializer()
\n",
71 | "tf.global_variables_initializer().run()
"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 3,
77 | "metadata": {
78 | "collapsed": false
79 | },
80 | "outputs": [
81 | {
82 | "name": "stdout",
83 | "output_type": "stream",
84 | "text": [
85 | "Tensor(\"weights/read:0\", shape=(2, 6), dtype=float32)\n",
86 | "Tensor(\"biases/read:0\", shape=(6,), dtype=float32)\n"
87 | ]
88 | }
89 | ],
90 | "source": [
91 | "#w = tf.Variable(, name=)\n",
92 | "weights = tf.Variable(tf.random_normal([2, 6]), name=\"weights\")\n",
93 | "biases = tf.Variable(tf.zeros([6]), name=\"biases\")\n",
94 | "print(weights)\n",
95 | "print(biases)"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "Tensorflow PlaceHolder
\n",
103 | "placeholder ใช้ในกรณีที่ต้องการ feed ค่าจากข้อมููลจากภายนอกไปเก็บไว้ใน tensor
\n",
104 | "มักร่วมกันกับ feed_dict เพื่อเชื่อมโยงข้อมููล"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 4,
110 | "metadata": {
111 | "collapsed": false
112 | },
113 | "outputs": [
114 | {
115 | "name": "stdout",
116 | "output_type": "stream",
117 | "text": [
118 | "Tensor(\"Placeholder:0\", shape=(5, 5), dtype=float32)\n",
119 | "Tensor(\"Placeholder_1:0\", shape=(?, 5), dtype=float32)\n",
120 | "Tensor(\"MatMul:0\", shape=(5, 5), dtype=float32)\n"
121 | ]
122 | }
123 | ],
124 | "source": [
125 | "inp = tf.placeholder(tf.float32,[5,5])\n",
126 | "data = tf.placeholder(tf.float32,[None,5])\n",
127 | "outp = tf.matmul(inp,data)\n",
128 | "print(inp)\n",
129 | "print(data)\n",
130 | "print(outp)"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | " Everything running thorght session
\n",
138 | "หลักการทำงานของ Tensorflow นั้นจะไม่ทำงานทันทีที่รันโค้ด แต่จะมีการสร้างโครงกราฟคำนวณขึึ้นมา เรียกว่า Computation Graph
\n",
139 | "และการรันกราฟนั้นจะต้องทำผ่าน Session"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 5,
145 | "metadata": {
146 | "collapsed": false
147 | },
148 | "outputs": [
149 | {
150 | "name": "stdout",
151 | "output_type": "stream",
152 | "text": [
153 | "30\n"
154 | ]
155 | }
156 | ],
157 | "source": [
158 | "sess = tf.Session()\n",
159 | "res = sess.run(result)\n",
160 | "print(res)\n",
161 | "sess.close()"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": 6,
167 | "metadata": {
168 | "collapsed": false
169 | },
170 | "outputs": [
171 | {
172 | "name": "stdout",
173 | "output_type": "stream",
174 | "text": [
175 | "30\n"
176 | ]
177 | }
178 | ],
179 | "source": [
180 | "with tf.Session() as sess:\n",
181 | " output = sess.run(x*y)\n",
182 | " print(output)"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": 7,
188 | "metadata": {
189 | "collapsed": false
190 | },
191 | "outputs": [
192 | {
193 | "name": "stdout",
194 | "output_type": "stream",
195 | "text": [
196 | "#############\n",
197 | "30\n",
198 | "#############\n",
199 | "[[-1.06843245 0.70515025 -0.99292415 0.13744356 0.20356697 0.70010537]\n",
200 | " [-1.14673483 1.0408721 -2.02005363 1.05647695 -0.2939561 -1.72112107]]\n",
201 | "#############\n",
202 | "[[ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
203 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
204 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
205 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]\n",
206 | " [ 3.53231573 1.71381831 2.75894833 2.79766798 1.06851387]]\n"
207 | ]
208 | }
209 | ],
210 | "source": [
211 | "sess = tf.InteractiveSession()\n",
212 | "tf.global_variables_initializer().run()\n",
213 | "print(\"#############\")\n",
214 | "print(result.eval())\n",
215 | "print(\"#############\")\n",
216 | "print(weights.eval())\n",
217 | "print(\"#############\")\n",
218 | "########### feed dict use for feed data to placeholder ############\n",
219 | "data_inp = np.ones([5,5])\n",
220 | "data_res = np.random.random([5,5])\n",
221 | "print(outp.eval(feed_dict={inp: data_inp, data: data_res}))"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "load and save variable
\n",
229 | "ในการทำงานอาจจะต้องมีการ save/load ค่าทีได้จากการคำนวณไว้ใช้งาน หรือป้องกันการเกิดกรณีที่ไม่คาดคิดเช่นไฟดับขณะรันโปรแกรม
\n",
230 | "ทุกครั้งที่ทำการสร้าง sess และ Init variable ขึ้นมาใหม่ค่าเดิมจะถููกล้างทั้งหมด"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {
237 | "collapsed": false
238 | },
239 | "outputs": [],
240 | "source": [
241 | "print(\"===========\")\n",
242 | "print(weights.eval())\n",
243 | "print(\"===========\")\n",
244 | "if not os.path.isdir('checkpoint'):\n",
245 | " os.mkdir('checkpoint')\n",
246 | " \n",
247 | "######## ทำการสร้างตัว Save/Load ########\n",
248 | "saver = tf.train.Saver()\n",
249 | "#saving model \n",
250 | "saver.save(sess, 'checkpoint/model', global_step=1)\n",
251 | "sess.close()\n",
252 | "####### re-init variable\n",
253 | "sess = tf.InteractiveSession()\n",
254 | "tf.global_variables_initializer().run()\n",
255 | "print(\"===========\")\n",
256 | "print(weights.eval())\n",
257 | "print(\"===========\")\n",
258 | "########## loading model\n",
259 | "checkpoint = tf.train.get_checkpoint_state('checkpoint/')\n",
260 | "if checkpoint and checkpoint.model_checkpoint_path:\n",
261 | " saver.restore(sess, checkpoint.model_checkpoint_path)\n",
262 | " print ('Successfully loaded:', checkpoint.model_checkpoint_path)\n",
263 | "print(\"===========\")\n",
264 | "print(weights.eval())\n",
265 | "print(\"===========\")"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {
271 | "collapsed": true
272 | },
273 | "source": [
274 | " Writing Graph Log
\n",
275 | "เราสามารถใช้ตัว summary เป็นตัวเก็บสถานะของการคำนวณเพื่อนำไปแสดงผลให้อยู่ในรูปแบบที่เข้าใจได้ง่าย
"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 8,
281 | "metadata": {
282 | "collapsed": false
283 | },
284 | "outputs": [
285 | {
286 | "name": "stdout",
287 | "output_type": "stream",
288 | "text": [
289 | "63\n"
290 | ]
291 | }
292 | ],
293 | "source": [
294 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
295 | "import tensorflow as tf\n",
296 | "import os\n",
297 | "\n",
298 | "if not os.path.isdir('logs'):\n",
299 | " os.mkdir('logs')\n",
300 | "a = tf.add(1, 2)\n",
301 | "b = tf.multiply(a, 3)\n",
302 | "c = tf.add(4, 5,)\n",
303 | "d = tf.multiply(c, 6,)\n",
304 | "e = tf.multiply(4, 5,)\n",
305 | "f = tf.div(c, 6,)\n",
306 | "g = tf.add(b, d)\n",
307 | "h = tf.multiply(g, f)\n",
308 | "init = tf.global_variables_initializer()\n",
309 | "with tf.Session() as sess:\n",
310 | " sess.run(init)\n",
311 | " writer = tf.summary.FileWriter(\"logs\", sess.graph) #graph=tf.get_default_graph() \n",
312 | " print(sess.run(h))\n",
313 | " writer.close()"
314 | ]
315 | },
316 | {
317 | "cell_type": "markdown",
318 | "metadata": {},
319 | "source": [
320 | "การใช้งาน Tensorboard
\n",
321 | "
\n",
322 | "
\n",
323 | "
\n",
324 | "
\n",
325 | "
\n",
326 | "
\n",
327 | "
\n",
328 | "
"
329 | ]
330 | },
331 | {
332 | "cell_type": "code",
333 | "execution_count": null,
334 | "metadata": {
335 | "collapsed": false
336 | },
337 | "outputs": [],
338 | "source": [
339 | "#ทำการ restart kernel ก่อน เดี๋ยวจะตีกันกับกราฟด้านบน\n",
340 | "import tensorflow as tf\n",
341 | "import os\n",
342 | "if not os.path.isdir('logs_scope'):\n",
343 | " os.mkdir('logs_scope')\n",
344 | "with tf.name_scope(\"OperationGroup_1\"):\n",
345 | " with tf.name_scope(\"Scope1\"):\n",
346 | " a = tf.add(1, 2, name=\"A_add\")\n",
347 | " b = tf.multiply(a, 3, name=\"B_multiply\")\n",
348 | " with tf.name_scope(\"Scope2\"):\n",
349 | " c = tf.add(4, 5, name=\"C_add\")\n",
350 | " d = tf.multiply(c, 6, name=\"D_multiply\")\n",
351 | "\n",
352 | "with tf.name_scope(\"Scope_3\"):\n",
353 | " e = tf.multiply(4, 5, name=\"E_add\")\n",
354 | " f = tf.div(c, 6, name=\"f_div\")\n",
355 | "g = tf.add(b, d,name=\"g_add\")\n",
356 | "h = tf.multiply(g, f)\n",
357 | "init = tf.global_variables_initializer()\n",
358 | "with tf.Session() as sess:\n",
359 | " sess.run(init)\n",
360 | " writer = tf.summary.FileWriter(\"logs_scope\", sess.graph) #graph=tf.get_default_graph() \n",
361 | " print(sess.run(h))\n",
362 | " writer.close()"
363 | ]
364 | },
365 | {
366 | "cell_type": "markdown",
367 | "metadata": {},
368 | "source": [
369 | "
\n",
370 | "
"
371 | ]
372 | },
373 | {
374 | "cell_type": "code",
375 | "execution_count": null,
376 | "metadata": {
377 | "collapsed": true
378 | },
379 | "outputs": [],
380 | "source": []
381 | }
382 | ],
383 | "metadata": {
384 | "kernelspec": {
385 | "display_name": "Tensorflow 3",
386 | "language": "python",
387 | "name": "tensorflow"
388 | },
389 | "language_info": {
390 | "codemirror_mode": {
391 | "name": "ipython",
392 | "version": 3
393 | },
394 | "file_extension": ".py",
395 | "mimetype": "text/x-python",
396 | "name": "python",
397 | "nbconvert_exporter": "python",
398 | "pygments_lexer": "ipython3",
399 | "version": "3.5.3"
400 | }
401 | },
402 | "nbformat": 4,
403 | "nbformat_minor": 2
404 | }
405 |
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic1.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic1.JPG
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic2.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic2.bmp
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic3.jpg
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic4.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic4.JPG
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic4.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic4.bmp
--------------------------------------------------------------------------------
/Lab02_Basic_Tensorflow/images/pic5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab02_Basic_Tensorflow/images/pic5.jpg
--------------------------------------------------------------------------------
/Lab03_Perceptron/.ipynb_checkpoints/Lab03_Perceptron_and_Backprob-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Basic Knowledge
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "1. Perceptron\n",
15 | "
"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {
22 | "collapsed": false
23 | },
24 | "outputs": [],
25 | "source": [
26 | "from random import choice\n",
27 | "from numpy import array, dot, random\n",
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "import matplotlib"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "collapsed": false
38 | },
39 | "outputs": [],
40 | "source": [
41 | "activation = lambda x: 0 if x < 0 else 1\n",
42 | "\n",
43 | "training_data = np.array([\n",
44 | " [0,0,1],\n",
45 | " [1,0,1],\n",
46 | " [0,1,1],\n",
47 | " [1,1,1]\n",
48 | "])\n",
49 | "target_data = np.array([\n",
50 | " 0, \n",
51 | " 1, \n",
52 | " 1, \n",
53 | " 1 \n",
54 | "])\n",
55 | "w = random.rand(3)\n",
56 | "#learning rate\n",
57 | "eta = 0.2\n",
58 | "n = 100\n",
59 | "\n",
60 | "#loop training\n",
61 | "for i in range(n):\n",
62 | " train_index = choice(range(0,4))\n",
63 | " x, expected = (training_data[train_index],target_data[train_index])\n",
64 | " #feed forward\n",
65 | " result = dot(x, w)\n",
66 | " result = activation(result)\n",
67 | " #error estimation\n",
68 | " error = expected - result\n",
69 | " #back prob\n",
70 | " w += eta * error * x\n",
71 | "#end loop\n",
72 | "\n",
73 | "print(w)\n",
74 | "\n",
75 | "for x in training_data:\n",
76 | " result = dot(x, w)\n",
77 | " print(\"{}: {} -> {}\".format(x[:2], result, activation(result)))"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": false
85 | },
86 | "outputs": [],
87 | "source": [
88 | "import utils\n",
89 | "import json\n",
90 | "plt = utils.plot_logic(plt,training_data,target_data)\n",
91 | "plt = utils.plot_space(plt,w)\n",
92 | "plt.show()\n"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "Learning Rate is jump step
\n",
100 | "
"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "Let Try : Others function
"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "1. AND\n",
115 | "2. OR\n",
116 | "3. XOR\n",
117 | "
\n",
118 | "
"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "Next : Softmax and realistic dataset
"
126 | ]
127 | }
128 | ],
129 | "metadata": {
130 | "kernelspec": {
131 | "display_name": "Tensorflow 3",
132 | "language": "python",
133 | "name": "tensorflow"
134 | },
135 | "language_info": {
136 | "codemirror_mode": {
137 | "name": "ipython",
138 | "version": 3
139 | },
140 | "file_extension": ".py",
141 | "mimetype": "text/x-python",
142 | "name": "python",
143 | "nbconvert_exporter": "python",
144 | "pygments_lexer": "ipython3",
145 | "version": "3.5.3"
146 | }
147 | },
148 | "nbformat": 4,
149 | "nbformat_minor": 2
150 | }
151 |
--------------------------------------------------------------------------------
/Lab03_Perceptron/.ipynb_checkpoints/Lab4_Perceptron_and_Backprob-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Basic Knowledge
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "1. Perceptron\n",
15 | "
"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {
22 | "collapsed": false
23 | },
24 | "outputs": [],
25 | "source": [
26 | "from random import choice\n",
27 | "from numpy import array, dot, random\n",
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "import matplotlib"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "collapsed": false
38 | },
39 | "outputs": [],
40 | "source": [
41 | "activation = lambda x: 0 if x < 0 else 1\n",
42 | "\n",
43 | "training_data = np.array([\n",
44 | " [0,0,1],\n",
45 | " [1,0,1],\n",
46 | " [0,1,1],\n",
47 | " [1,1,1]\n",
48 | "])\n",
49 | "target_data = np.array([\n",
50 | " 0, \n",
51 | " 1, \n",
52 | " 1, \n",
53 | " 1 \n",
54 | "])\n",
55 | "w = random.rand(3)\n",
56 | "#learning rate\n",
57 | "eta = 0.2\n",
58 | "n = 100\n",
59 | "\n",
60 | "#loop training\n",
61 | "for i in range(n):\n",
62 | " train_index = choice(range(0,4))\n",
63 | " x, expected = (training_data[train_index],target_data[train_index])\n",
64 | " #feed forward\n",
65 | " result = dot(x, w)\n",
66 | " result = activation(result)\n",
67 | " #error estimation\n",
68 | " error = expected - result\n",
69 | " #back prob\n",
70 | " w += eta * error * x\n",
71 | "#end loop\n",
72 | "\n",
73 | "print(w)\n",
74 | "\n",
75 | "for x in training_data:\n",
76 | " result = dot(x, w)\n",
77 | " print(\"{}: {} -> {}\".format(x[:2], result, activation(result)))"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": false
85 | },
86 | "outputs": [],
87 | "source": [
88 | "import utils\n",
89 | "import json\n",
90 | "plt = utils.plot_logic(plt,training_data,target_data)\n",
91 | "plt = utils.plot_space(plt,w)\n",
92 | "plt.show()\n"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "Let Try : Others function
"
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "1. AND\n",
107 | "2. OR\n",
108 | "3. XOR\n",
109 | "
\n",
110 | "
"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "Next : Softmax and realistic dataset
"
118 | ]
119 | }
120 | ],
121 | "metadata": {
122 | "kernelspec": {
123 | "display_name": "Tensorflow 3",
124 | "language": "python",
125 | "name": "tensorflow"
126 | },
127 | "language_info": {
128 | "codemirror_mode": {
129 | "name": "ipython",
130 | "version": 3
131 | },
132 | "file_extension": ".py",
133 | "mimetype": "text/x-python",
134 | "name": "python",
135 | "nbconvert_exporter": "python",
136 | "pygments_lexer": "ipython3",
137 | "version": "3.5.3"
138 | }
139 | },
140 | "nbformat": 4,
141 | "nbformat_minor": 2
142 | }
143 |
--------------------------------------------------------------------------------
/Lab03_Perceptron/538.json:
--------------------------------------------------------------------------------
1 | {
2 | "lines.linewidth": 1.0,
3 | "patch.linewidth": 0.5,
4 | "legend.fancybox": true,
5 | "axes.facecolor": "#fcfcfc",
6 | "axes.labelsize": "large",
7 | "axes.axisbelow": true,
8 | "axes.grid": true,
9 | "patch.edgecolor": "#fcfcfc",
10 | "axes.titlesize": "x-large",
11 | "examples.directory": "",
12 | "figure.facecolor": "#fcfcfc",
13 | "grid.linewidth": 1.0,
14 | "grid.color": "#808080",
15 | "axes.edgecolor":"#000000",
16 | "xtick.major.size": 0,
17 | "xtick.minor.size": 0,
18 | "ytick.major.size": 0,
19 | "ytick.minor.size": 0,
20 | "axes.linewidth": 2.0,
21 | "font.size":14.0,
22 | "lines.linewidth": 2,
23 | "savefig.edgecolor": "#fcfcfc",
24 | "savefig.facecolor": "#fcfcfc",
25 | "figure.subplot.left" : 0.08,
26 | "figure.subplot.right" : 0.95,
27 | "figure.subplot.bottom" : 0.07
28 | }
--------------------------------------------------------------------------------
/Lab03_Perceptron/Lab03_Perceptron_and_Backprob.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "Basic Knowledge
"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "1. Perceptron\n",
15 | "
"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {
22 | "collapsed": false
23 | },
24 | "outputs": [],
25 | "source": [
26 | "from random import choice\n",
27 | "from numpy import array, dot, random\n",
28 | "import numpy as np\n",
29 | "import matplotlib.pyplot as plt\n",
30 | "import matplotlib"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "collapsed": false
38 | },
39 | "outputs": [],
40 | "source": [
41 | "activation = lambda x: 0 if x < 0 else 1\n",
42 | "\n",
43 | "training_data = np.array([\n",
44 | " [0,0,1],\n",
45 | " [1,0,1],\n",
46 | " [0,1,1],\n",
47 | " [1,1,1]\n",
48 | "])\n",
49 | "target_data = np.array([\n",
50 | " 0, \n",
51 | " 1, \n",
52 | " 1, \n",
53 | " 1 \n",
54 | "])\n",
55 | "w = random.rand(3)\n",
56 | "#learning rate\n",
57 | "eta = 0.2\n",
58 | "n = 100\n",
59 | "\n",
60 | "#loop training\n",
61 | "for i in range(n):\n",
62 | " train_index = choice(range(0,4))\n",
63 | " x, expected = (training_data[train_index],target_data[train_index])\n",
64 | " #feed forward\n",
65 | " result = dot(x, w)\n",
66 | " result = activation(result)\n",
67 | " #error estimation\n",
68 | " error = expected - result\n",
69 | " #back prob\n",
70 | " w += eta * error * x\n",
71 | "#end loop\n",
72 | "\n",
73 | "print(w)\n",
74 | "\n",
75 | "for x in training_data:\n",
76 | " result = dot(x, w)\n",
77 | " print(\"{}: {} -> {}\".format(x[:2], result, activation(result)))"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": false
85 | },
86 | "outputs": [],
87 | "source": [
88 | "import utils\n",
89 | "import json\n",
90 | "plt = utils.plot_logic(plt,training_data,target_data)\n",
91 | "plt = utils.plot_space(plt,w)\n",
92 | "plt.show()\n"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "Learning Rate is jump step
\n",
100 | "
"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "Let Try : Others function
"
108 | ]
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": [
114 | "1. AND\n",
115 | "2. OR\n",
116 | "3. XOR\n",
117 | "
\n",
118 | "
"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "Next : Softmax and realistic dataset
"
126 | ]
127 | }
128 | ],
129 | "metadata": {
130 | "kernelspec": {
131 | "display_name": "Tensorflow 3",
132 | "language": "python",
133 | "name": "tensorflow"
134 | },
135 | "language_info": {
136 | "codemirror_mode": {
137 | "name": "ipython",
138 | "version": 3
139 | },
140 | "file_extension": ".py",
141 | "mimetype": "text/x-python",
142 | "name": "python",
143 | "nbconvert_exporter": "python",
144 | "pygments_lexer": "ipython3",
145 | "version": "3.5.3"
146 | }
147 | },
148 | "nbformat": 4,
149 | "nbformat_minor": 2
150 | }
151 |
--------------------------------------------------------------------------------
/Lab03_Perceptron/__pycache__/utils.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab03_Perceptron/__pycache__/utils.cpython-35.pyc
--------------------------------------------------------------------------------
/Lab03_Perceptron/images/1.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab03_Perceptron/images/1.JPG
--------------------------------------------------------------------------------
/Lab03_Perceptron/images/logic_table.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab03_Perceptron/images/logic_table.gif
--------------------------------------------------------------------------------
/Lab03_Perceptron/images/lr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab03_Perceptron/images/lr.png
--------------------------------------------------------------------------------
/Lab03_Perceptron/images/xor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab03_Perceptron/images/xor.png
--------------------------------------------------------------------------------
/Lab03_Perceptron/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from matplotlib.colors import LinearSegmentedColormap
3 | import json
4 | import matplotlib
5 |
6 |
7 | def plot_training_group(plt,X, indices, color, edgecolor):
8 | #indices = indices[1]
9 | plot_x = X[indices, 0]
10 | plot_y = X[indices, 1]
11 | plt.plot(plot_x, plot_y, 'o',
12 | markersize=15.0,
13 | color=color,
14 | markeredgecolor=edgecolor,
15 | markeredgewidth=2.0,
16 | zorder=10)
17 |
18 | def decision_colorbar():
19 | negative_r = 1.000
20 | negative_g = 0.859
21 | negative_b = 0.859
22 |
23 | positive_r = 0.941
24 | positive_g = 1.000
25 | positive_b = 0.839
26 |
27 | decision_c = 1.000
28 |
29 | dec_delta = 0.05
30 | dec_boundary = 0.5
31 | dec_boundary_low = dec_boundary - dec_delta
32 | dec_boundary_high = dec_boundary + dec_delta
33 |
34 | c_dict = {'red': ((0.000, negative_r, negative_r),
35 | (dec_boundary_low, negative_r, negative_r),
36 | (dec_boundary, decision_c, decision_c),
37 | (dec_boundary_high, positive_r, positive_r),
38 | (1.0, positive_r, positive_r)),
39 | 'green': ((0.00, negative_g, negative_g),
40 | (dec_boundary_low, negative_g, negative_g),
41 | (dec_boundary, decision_c, decision_c),
42 | (dec_boundary_high, positive_g, positive_g),
43 | (1.00, positive_g, positive_g)),
44 | 'blue': ((0.0, negative_b, negative_b),
45 | (dec_boundary_low, negative_b, negative_b),
46 | (dec_boundary, decision_c, decision_c),
47 | (dec_boundary_high, positive_b, positive_b),
48 | (1.0, positive_b, positive_b))}
49 |
50 | colorbar = LinearSegmentedColormap('XOR Green Red', c_dict)
51 | return colorbar
52 |
53 |
54 | def plot_data_group(ax, X, Z, marker, markersize):
55 |
56 | plot_x = X[:, 0]
57 | plot_y = X[:, 1]
58 |
59 | colorbar = decision_colorbar()
60 |
61 | ax.scatter(plot_x, plot_y, marker='o', c=Z, linewidths=0,
62 | cmap=colorbar, zorder=-1)
63 |
64 | def plot_logic(plt,training_data,target_data):
65 | style = json.load(open("538.json"))
66 | matplotlib.rcParams.update(style)
67 |
68 | fig = plt.figure()
69 | ax = fig.add_subplot(1, 1, 1)
70 | ax.set_title("Logic Table")
71 |
72 | # Training colors
73 | red = '#FF6B6B'
74 | green = '#AFE650'
75 | lightred = '#FFDBDB'
76 | lightgreen = '#F0FFD6'
77 |
78 | # # Plot Training 0's
79 | indices = np.where(np.array(target_data) < 0.5)
80 | plot_training_group(plt,training_data[:,:-1], indices, red, lightred)
81 |
82 | # # Plot Training 1's
83 | indices = np.where(np.array(target_data) > 0.5)
84 | plot_training_group(plt,training_data[:,:-1], indices, green, lightgreen)
85 |
86 | # splines
87 | ax.spines['right'].set_color('none')
88 | ax.spines['top'].set_color('none')
89 | ax.spines['left'].set_color('none')
90 | ax.spines['bottom'].set_color('none')
91 |
92 | # Axis
93 | ax.set_ylim([-0.1, 1.1])
94 | ax.set_xlim([-0.1, 1.1])
95 |
96 | axis_x = [-0.1, 1.1]
97 | axis_y = [0, 0]
98 | ax.plot(axis_x, axis_y, color='#000000', linewidth=1.0)
99 | axis_x = [0, 0]
100 | axis_y = [-0.1, 1.1]
101 | ax.plot(axis_x, axis_y, color='#000000', linewidth=1.0)
102 |
103 | plt.gca().set_aspect('equal', adjustable='box')
104 | return plt
105 | def plot_space(plt,w):
106 | x = np.linspace(-0.1, 1.1)
107 | bias = np.ones([len(x)])
108 | for i in range(len(x)):
109 | x_ = np.zeros([len(x)])
110 | x_.fill(x[i])
111 | y = np.linspace(-0.1, 1.1)
112 | inp = np.dstack([x_,y,bias])
113 | inp = np.reshape(inp,[len(x),3])
114 | z = np.matmul(inp,w)
115 | indices = np.where(np.array(z) > 0)
116 | plt.plot(x_[indices], y[indices], 'o',
117 | markersize=1.0,
118 | color='#99e05c')
119 | indices = np.where(np.array(z) <= 0)
120 | plt.plot(x_[indices], y[indices], 'o',
121 | markersize=1.0,
122 | color='#db7559')
123 | return plt
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/.ipynb_checkpoints/04_Simple_Softmax-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "index = 10\n",
35 | "tmp = mnist.train.images[index]\n",
36 | "tmp = tmp.reshape((28,28))\n",
37 | "\n",
38 | "plt.imshow(tmp, cmap = cm.Greys)\n",
39 | "plt.show()\n",
40 | "print(\"One-hot Label for this images = \", end=\" \")\n",
41 | "onehot_label = mnist.train.labels[index]\n",
42 | "print(onehot_label)\n",
43 | "print(\"Index = %d\" % np.argmax(onehot_label))"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "
"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": false
58 | },
59 | "outputs": [],
60 | "source": [
61 | "X = tf.placeholder(tf.float32, [None,784])\n",
62 | "\n",
63 | "W = tf.Variable(tf.zeros([784,10]))\n",
64 | "b = tf.Variable(tf.zeros([10]))"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {
71 | "collapsed": false
72 | },
73 | "outputs": [],
74 | "source": [
75 | "#model\n",
76 | "net = tf.matmul(X, W) + b #logits\n",
77 | "Y = tf.nn.softmax(net)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": true
85 | },
86 | "outputs": [],
87 | "source": [
88 | "# Define loss and optimizer\n",
89 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
90 | "#loss function\n",
91 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net))\n",
92 | "\n",
93 | "#Optimizer\n",
94 | "optimizer = tf.train.GradientDescentOptimizer(0.05)\n",
95 | "train_step = optimizer.minimize(cross_entropy)\n",
96 | "#or train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
97 | "\n",
98 | "# % of correct answers found in batch\n",
99 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
100 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "
\n",
108 | "
\n",
109 | "
"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {
116 | "collapsed": false
117 | },
118 | "outputs": [],
119 | "source": [
120 | "sess = tf.InteractiveSession()\n",
121 | "tf.global_variables_initializer().run()\n",
122 | "\n",
123 | "for i in range(10000):\n",
124 | " #load batch of images and correct answers\n",
125 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
126 | " train_data={X: batch_X, Y_: batch_Y}\n",
127 | " #train\n",
128 | " sess.run(train_step, feed_dict=train_data)\n",
129 | " if i % 100 == 0:\n",
130 | " #success ?\n",
131 | " a,c = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
132 | " print(\"Step : %d acc = %.4f loss = %.4f\" % (i,a,c))\n",
133 | " #--- edit\n",
134 | "#success on test data?\n",
135 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
136 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
137 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {
144 | "collapsed": false
145 | },
146 | "outputs": [],
147 | "source": [
148 | "print(\"Test image size\")\n",
149 | "print(mnist.test.images.shape)\n",
150 | "\n",
151 | "im_test = mnist.test.images[0].reshape([28,28])\n",
152 | "plt.imshow(im_test, cmap= cm.Greys)\n",
153 | "plt.show()\n",
154 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
155 | "res = net.eval(feed_dict = {X:[mnist.test.images[0]]})\n",
156 | "print(\"Result size : \")\n",
157 | "print(res.shape)\n",
158 | "print(\"Picking up first response \")\n",
159 | "print(res[0])\n",
160 | "print(\"Softmax percentage : \")\n",
161 | "print(tf.nn.softmax(res[0]).eval())\n",
162 | "print(\"Result are : %d\" % np.argmax(res[0]))"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "collapsed": false
170 | },
171 | "outputs": [],
172 | "source": [
173 | "wts = W.eval()\n",
174 | "wts.shape\n",
175 | "for i in range(0,10):\n",
176 | " im = wts.flatten()[i::10].reshape((28,-1))\n",
177 | " plt.imshow(im, cmap = cm.Greys)\n",
178 | " plt.show()"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {
185 | "collapsed": true
186 | },
187 | "outputs": [],
188 | "source": []
189 | }
190 | ],
191 | "metadata": {
192 | "kernelspec": {
193 | "display_name": "Tensorflow 3",
194 | "language": "python",
195 | "name": "tensorflow"
196 | },
197 | "language_info": {
198 | "codemirror_mode": {
199 | "name": "ipython",
200 | "version": 3
201 | },
202 | "file_extension": ".py",
203 | "mimetype": "text/x-python",
204 | "name": "python",
205 | "nbconvert_exporter": "python",
206 | "pygments_lexer": "ipython3",
207 | "version": "3.5.3"
208 | }
209 | },
210 | "nbformat": 4,
211 | "nbformat_minor": 2
212 | }
213 |
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/.ipynb_checkpoints/05_Simple_Softmax-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"data/\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "index = 10\n",
35 | "tmp = mnist.train.images[index]\n",
36 | "tmp = tmp.reshape((28,28))\n",
37 | "\n",
38 | "plt.imshow(tmp, cmap = cm.Greys)\n",
39 | "plt.show()\n",
40 | "print(\"One-hot Label for this images = \", end=\" \")\n",
41 | "onehot_label = mnist.train.labels[index]\n",
42 | "print(onehot_label)\n",
43 | "print(\"Index = %d\" % np.argmax(onehot_label))"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "
"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": false
58 | },
59 | "outputs": [],
60 | "source": [
61 | "X = tf.placeholder(tf.float32, [None,784])\n",
62 | "\n",
63 | "W = tf.Variable(tf.zeros([784,10]))\n",
64 | "b = tf.Variable(tf.zeros([10]))"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {
71 | "collapsed": false
72 | },
73 | "outputs": [],
74 | "source": [
75 | "#model\n",
76 | "net = tf.matmul(X, W) + b #logits\n",
77 | "Y = tf.nn.softmax(net)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": true
85 | },
86 | "outputs": [],
87 | "source": [
88 | "# Define loss and optimizer\n",
89 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
90 | "#loss function\n",
91 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net))\n",
92 | "\n",
93 | "#Optimizer\n",
94 | "optimizer = tf.train.GradientDescentOptimizer(0.05)\n",
95 | "train_step = optimizer.minimize(cross_entropy)\n",
96 | "#or train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
97 | "\n",
98 | "# % of correct answers found in batch\n",
99 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
100 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "
\n",
108 | "
\n",
109 | "
"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {
116 | "collapsed": false
117 | },
118 | "outputs": [],
119 | "source": [
120 | "sess = tf.InteractiveSession()\n",
121 | "tf.global_variables_initializer().run()\n",
122 | "\n",
123 | "for i in range(10000):\n",
124 | " #load batch of images and correct answers\n",
125 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
126 | " train_data={X: batch_X, Y_: batch_Y}\n",
127 | " #train\n",
128 | " sess.run(train_step, feed_dict=train_data)\n",
129 | " if i % 100 == 0:\n",
130 | " #success ?\n",
131 | " a,c = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
132 | " print(\"Step : %d acc = %.4f loss = %.4f\" % (i,a,c))\n",
133 | " #--- edit\n",
134 | "#success on test data?\n",
135 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
136 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
137 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {
144 | "collapsed": false
145 | },
146 | "outputs": [],
147 | "source": [
148 | "print(\"Test image size\")\n",
149 | "print(mnist.test.images.shape)\n",
150 | "\n",
151 | "im_test = mnist.test.images[0].reshape([28,28])\n",
152 | "plt.imshow(im_test, cmap= cm.Greys)\n",
153 | "plt.show()\n",
154 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
155 | "res = net.eval(feed_dict = {X:[mnist.test.images[0]]})\n",
156 | "print(\"Result size : \")\n",
157 | "print(res.shape)\n",
158 | "print(\"Picking up first response \")\n",
159 | "print(res[0])\n",
160 | "print(\"Softmax percentage : \")\n",
161 | "print(tf.nn.softmax(res[0]).eval())\n",
162 | "print(\"Result are : %d\" % np.argmax(res[0]))"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "collapsed": false
170 | },
171 | "outputs": [],
172 | "source": [
173 | "wts = W.eval()\n",
174 | "wts.shape\n",
175 | "for i in range(0,10):\n",
176 | " im = wts.flatten()[i::10].reshape((28,-1))\n",
177 | " plt.imshow(im, cmap = cm.Greys)\n",
178 | " plt.show()"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {
185 | "collapsed": true
186 | },
187 | "outputs": [],
188 | "source": []
189 | }
190 | ],
191 | "metadata": {
192 | "kernelspec": {
193 | "display_name": "Tensorflow 3",
194 | "language": "python",
195 | "name": "tensorflow"
196 | },
197 | "language_info": {
198 | "codemirror_mode": {
199 | "name": "ipython",
200 | "version": 3
201 | },
202 | "file_extension": ".py",
203 | "mimetype": "text/x-python",
204 | "name": "python",
205 | "nbconvert_exporter": "python",
206 | "pygments_lexer": "ipython3",
207 | "version": "3.5.3"
208 | }
209 | },
210 | "nbformat": 4,
211 | "nbformat_minor": 2
212 | }
213 |
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/04_Simple_Softmax.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "index = 10\n",
35 | "tmp = mnist.train.images[index]\n",
36 | "tmp = tmp.reshape((28,28))\n",
37 | "\n",
38 | "plt.imshow(tmp, cmap = cm.Greys)\n",
39 | "plt.show()\n",
40 | "print(\"One-hot Label for this images = \", end=\" \")\n",
41 | "onehot_label = mnist.train.labels[index]\n",
42 | "print(onehot_label)\n",
43 | "print(\"Index = %d\" % np.argmax(onehot_label))"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "
"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": false
58 | },
59 | "outputs": [],
60 | "source": [
61 | "X = tf.placeholder(tf.float32, [None,784])\n",
62 | "\n",
63 | "W = tf.Variable(tf.zeros([784,10]))\n",
64 | "b = tf.Variable(tf.zeros([10]))"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {
71 | "collapsed": false
72 | },
73 | "outputs": [],
74 | "source": [
75 | "#model\n",
76 | "net = tf.matmul(X, W) + b #logits\n",
77 | "Y = tf.nn.softmax(net)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "collapsed": true
85 | },
86 | "outputs": [],
87 | "source": [
88 | "# Define loss and optimizer\n",
89 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
90 | "#loss function\n",
91 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net))\n",
92 | "\n",
93 | "#Optimizer\n",
94 | "optimizer = tf.train.GradientDescentOptimizer(0.05)\n",
95 | "train_step = optimizer.minimize(cross_entropy)\n",
96 | "#or train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
97 | "\n",
98 | "# % of correct answers found in batch\n",
99 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
100 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))"
101 | ]
102 | },
103 | {
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "
\n",
108 | "
\n",
109 | "
"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {
116 | "collapsed": false
117 | },
118 | "outputs": [],
119 | "source": [
120 | "sess = tf.InteractiveSession()\n",
121 | "tf.global_variables_initializer().run()\n",
122 | "\n",
123 | "for i in range(10000):\n",
124 | " #load batch of images and correct answers\n",
125 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
126 | " train_data={X: batch_X, Y_: batch_Y}\n",
127 | " #train\n",
128 | " sess.run(train_step, feed_dict=train_data)\n",
129 | " if i % 100 == 0:\n",
130 | " #success ?\n",
131 | " a,c = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
132 | " print(\"Step : %d acc = %.4f loss = %.4f\" % (i,a,c))\n",
133 | " #--- edit\n",
134 | "#success on test data?\n",
135 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
136 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
137 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": null,
143 | "metadata": {
144 | "collapsed": false
145 | },
146 | "outputs": [],
147 | "source": [
148 | "print(\"Test image size\")\n",
149 | "print(mnist.test.images.shape)\n",
150 | "\n",
151 | "im_test = mnist.test.images[0].reshape([28,28])\n",
152 | "plt.imshow(im_test, cmap= cm.Greys)\n",
153 | "plt.show()\n",
154 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
155 | "res = net.eval(feed_dict = {X:[mnist.test.images[0]]})\n",
156 | "print(\"Result size : \")\n",
157 | "print(res.shape)\n",
158 | "print(\"Picking up first response \")\n",
159 | "print(res[0])\n",
160 | "print(\"Softmax percentage : \")\n",
161 | "print(tf.nn.softmax(res[0]).eval())\n",
162 | "print(\"Result are : %d\" % np.argmax(res[0]))"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "collapsed": false
170 | },
171 | "outputs": [],
172 | "source": [
173 | "wts = W.eval()\n",
174 | "wts.shape\n",
175 | "for i in range(0,10):\n",
176 | " im = wts.flatten()[i::10].reshape((28,-1))\n",
177 | " plt.imshow(im, cmap = cm.Greys)\n",
178 | " plt.show()"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": null,
184 | "metadata": {
185 | "collapsed": true
186 | },
187 | "outputs": [],
188 | "source": []
189 | }
190 | ],
191 | "metadata": {
192 | "kernelspec": {
193 | "display_name": "Tensorflow 3",
194 | "language": "python",
195 | "name": "tensorflow"
196 | },
197 | "language_info": {
198 | "codemirror_mode": {
199 | "name": "ipython",
200 | "version": 3
201 | },
202 | "file_extension": ".py",
203 | "mimetype": "text/x-python",
204 | "name": "python",
205 | "nbconvert_exporter": "python",
206 | "pygments_lexer": "ipython3",
207 | "version": "3.5.3"
208 | }
209 | },
210 | "nbformat": 4,
211 | "nbformat_minor": 2
212 | }
213 |
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab04_Simple_Softmax/images/3.png
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/images/optimizer1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab04_Simple_Softmax/images/optimizer1.gif
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/images/optimizer2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab04_Simple_Softmax/images/optimizer2.gif
--------------------------------------------------------------------------------
/Lab04_Simple_Softmax/images/softmax_regression_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab04_Simple_Softmax/images/softmax_regression_en.png
--------------------------------------------------------------------------------
/Lab05_Multi_Layer_Percentron/.ipynb_checkpoints/Lab05_Multilayer_Perceptron-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data/\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": true
31 | },
32 | "outputs": [],
33 | "source": [
34 | "X = tf.placeholder(tf.float32, [None,784])\n",
35 | "#hidden\n",
36 | "hidden = 512\n",
37 | "W1 = tf.Variable(tf.truncated_normal([784,hidden],stddev=0.1))\n",
38 | "b1 = tf.Variable(tf.zeros([hidden]))\n",
39 | "#out layer\n",
40 | "W2 = tf.Variable(tf.truncated_normal([hidden,10],stddev=0.1))\n",
41 | "b2 = tf.Variable(tf.zeros([10]))"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {
48 | "collapsed": false
49 | },
50 | "outputs": [],
51 | "source": [
52 | "#model\n",
53 | "net = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n",
54 | "net1 = tf.matmul(net, W2) + b2\n",
55 | "Y = tf.nn.softmax(net1)\n",
56 | "Y_ = tf.placeholder(tf.float32, [None, 10])"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {
63 | "collapsed": false
64 | },
65 | "outputs": [],
66 | "source": [
67 | "#loss function\n",
68 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net1))\n",
69 | "\n",
70 | "# % of correct answers found in batch\n",
71 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
72 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
73 | "\n",
74 | "# Create a summary to monitor cost tensor\n",
75 | "tf.summary.scalar('loss', cross_entropy)\n",
76 | "# Create a summary to monitor accuracy tensor\n",
77 | "tf.summary.scalar('accuracy', accuracy)\n",
78 | "#for weight\n",
79 | "with tf.name_scope('Weights'):\n",
80 | " tf.summary.histogram(\"weight1\", W1)\n",
81 | " tf.summary.histogram(\"weight2\", W2)\n",
82 | " tf.summary.histogram(\"bias_1\", b1)\n",
83 | " tf.summary.histogram(\"bias_2\", b2)\n",
84 | "\n",
85 | "summary_op = tf.summary.merge_all()\n",
86 | "\n",
87 | "optimizer = tf.train.AdamOptimizer()\n",
88 | "train_step = optimizer.minimize(cross_entropy)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {
95 | "collapsed": false
96 | },
97 | "outputs": [],
98 | "source": [
99 | "#create session\n",
100 | "sess = tf.InteractiveSession()\n",
101 | "tf.global_variables_initializer().run()\n",
102 | "\n",
103 | "#create summary op to write logs to Tensorboard\n",
104 | "train_summary_writer = tf.summary.FileWriter('../logs/mlp_train', graph=sess.graph)\n",
105 | "test_summary_writer = tf.summary.FileWriter('../logs/mlp_test', graph=sess.graph)\n",
106 | "\n",
107 | "for i in range(10000):\n",
108 | " #load batch of images and correct answers\n",
109 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
110 | " train_data={X: batch_X, Y_: batch_Y}\n",
111 | " #train \n",
112 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
113 | " # Write logs at every iteration\n",
114 | " train_summary_writer.add_summary(summary,i)\n",
115 | " if i % 100 == 0: \n",
116 | " #success ?\n",
117 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
118 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
119 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
120 | " test_summary_writer.add_summary(summary_test,i)\n",
121 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc))\n",
122 | " \n",
123 | " #--- edit\n",
124 | "#success on test data?\n",
125 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
126 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
127 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {
134 | "collapsed": false
135 | },
136 | "outputs": [],
137 | "source": [
138 | "print(\"Test image size\")\n",
139 | "print(mnist.test.images.shape)\n",
140 | "\n",
141 | "im_test = mnist.test.images[0].reshape([28,28])\n",
142 | "plt.imshow(im_test, cmap= cm.Greys)\n",
143 | "plt.show()\n",
144 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
145 | "res = Y.eval(feed_dict = test_data)\n",
146 | "print(\"Result size : \")\n",
147 | "print(res.shape)\n",
148 | "print(\"Picking up first response \")\n",
149 | "print(res[0])\n",
150 | "print(\"Softmax percentage : \")\n",
151 | "print(tf.nn.softmax(res[0]).eval())\n",
152 | "print(\"Result are : %d\" % np.argmax(res[0]))"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {
159 | "collapsed": false
160 | },
161 | "outputs": [],
162 | "source": [
163 | "wts = W1.eval()\n",
164 | "wts.shape\n",
165 | "for i in range(0,10):\n",
166 | " im = wts.flatten()[i::512].reshape((28,-1))\n",
167 | " plt.imshow(im, cmap = cm.Greys)\n",
168 | " plt.show()"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {
175 | "collapsed": false
176 | },
177 | "outputs": [],
178 | "source": [
179 | "wts = W2.eval()\n",
180 | "im = wts.flatten()[i::10].reshape([16,32])\n",
181 | "plt.imshow(im, cmap = cm.Greys)\n",
182 | "plt.show()"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {
189 | "collapsed": true
190 | },
191 | "outputs": [],
192 | "source": []
193 | }
194 | ],
195 | "metadata": {
196 | "kernelspec": {
197 | "display_name": "Tensorflow 3",
198 | "language": "python",
199 | "name": "tensorflow"
200 | },
201 | "language_info": {
202 | "codemirror_mode": {
203 | "name": "ipython",
204 | "version": 3
205 | },
206 | "file_extension": ".py",
207 | "mimetype": "text/x-python",
208 | "name": "python",
209 | "nbconvert_exporter": "python",
210 | "pygments_lexer": "ipython3",
211 | "version": "3.5.3"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 2
216 | }
217 |
--------------------------------------------------------------------------------
/Lab05_Multi_Layer_Percentron/.ipynb_checkpoints/Lab6_Multilayer_Perceptron-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"data/\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": true
31 | },
32 | "outputs": [],
33 | "source": [
34 | "X = tf.placeholder(tf.float32, [None,784])\n",
35 | "#hidden\n",
36 | "hidden = 512\n",
37 | "W1 = tf.Variable(tf.truncated_normal([784,hidden],stddev=0.1))\n",
38 | "b1 = tf.Variable(tf.zeros([hidden]))\n",
39 | "#out layer\n",
40 | "W2 = tf.Variable(tf.truncated_normal([hidden,10],stddev=0.1))\n",
41 | "b2 = tf.Variable(tf.zeros([10]))"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {
48 | "collapsed": false
49 | },
50 | "outputs": [],
51 | "source": [
52 | "#model\n",
53 | "net = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n",
54 | "net1 = tf.matmul(net, W2) + b2\n",
55 | "Y = tf.nn.softmax(net1)\n",
56 | "Y_ = tf.placeholder(tf.float32, [None, 10])"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {
63 | "collapsed": false
64 | },
65 | "outputs": [],
66 | "source": [
67 | "#loss function\n",
68 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net1))\n",
69 | "\n",
70 | "# % of correct answers found in batch\n",
71 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
72 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
73 | "\n",
74 | "# Create a summary to monitor cost tensor\n",
75 | "tf.summary.scalar('loss', cross_entropy)\n",
76 | "# Create a summary to monitor accuracy tensor\n",
77 | "tf.summary.scalar('accuracy', accuracy)\n",
78 | "#for weight\n",
79 | "with tf.name_scope('Weights'):\n",
80 | " tf.summary.histogram(\"weight1\", W1)\n",
81 | " tf.summary.histogram(\"weight2\", W2)\n",
82 | " tf.summary.histogram(\"bias_1\", b1)\n",
83 | " tf.summary.histogram(\"bias_2\", b2)\n",
84 | "\n",
85 | "summary_op = tf.summary.merge_all()\n",
86 | "\n",
87 | "optimizer = tf.train.AdamOptimizer()\n",
88 | "train_step = optimizer.minimize(cross_entropy)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {
95 | "collapsed": false
96 | },
97 | "outputs": [],
98 | "source": [
99 | "#create session\n",
100 | "sess = tf.InteractiveSession()\n",
101 | "tf.global_variables_initializer().run()\n",
102 | "\n",
103 | "#create summary op to write logs to Tensorboard\n",
104 | "train_summary_writer = tf.summary.FileWriter('logs/mlp_train', graph=sess.graph)\n",
105 | "test_summary_writer = tf.summary.FileWriter('logs/mlp_test', graph=sess.graph)\n",
106 | "\n",
107 | "for i in range(10000):\n",
108 | " #load batch of images and correct answers\n",
109 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
110 | " train_data={X: batch_X, Y_: batch_Y}\n",
111 | " #train \n",
112 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
113 | " # Write logs at every iteration\n",
114 | " train_summary_writer.add_summary(summary,i)\n",
115 | " if i % 100 == 0: \n",
116 | " #success ?\n",
117 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
118 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
119 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
120 | " test_summary_writer.add_summary(summary_test,i)\n",
121 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc))\n",
122 | " \n",
123 | " #--- edit\n",
124 | "#success on test data?\n",
125 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
126 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
127 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {
134 | "collapsed": false
135 | },
136 | "outputs": [],
137 | "source": [
138 | "print(\"Test image size\")\n",
139 | "print(mnist.test.images.shape)\n",
140 | "\n",
141 | "im_test = mnist.test.images[0].reshape([28,28])\n",
142 | "plt.imshow(im_test, cmap= cm.Greys)\n",
143 | "plt.show()\n",
144 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
145 | "res = Y.eval(feed_dict = test_data)\n",
146 | "print(\"Result size : \")\n",
147 | "print(res.shape)\n",
148 | "print(\"Picking up first response \")\n",
149 | "print(res[0])\n",
150 | "print(\"Softmax percentage : \")\n",
151 | "print(tf.nn.softmax(res[0]).eval())\n",
152 | "print(\"Result are : %d\" % np.argmax(res[0]))"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {
159 | "collapsed": false
160 | },
161 | "outputs": [],
162 | "source": [
163 | "wts = W1.eval()\n",
164 | "wts.shape\n",
165 | "for i in range(0,10):\n",
166 | " im = wts.flatten()[i::512].reshape((28,-1))\n",
167 | " plt.imshow(im, cmap = cm.Greys)\n",
168 | " plt.show()"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {
175 | "collapsed": false
176 | },
177 | "outputs": [],
178 | "source": [
179 | "wts = W2.eval()\n",
180 | "im = wts.flatten()[i::10].reshape([16,32])\n",
181 | "plt.imshow(im, cmap = cm.Greys)\n",
182 | "plt.show()"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {
189 | "collapsed": true
190 | },
191 | "outputs": [],
192 | "source": []
193 | }
194 | ],
195 | "metadata": {
196 | "kernelspec": {
197 | "display_name": "Tensorflow 3",
198 | "language": "python",
199 | "name": "tensorflow"
200 | },
201 | "language_info": {
202 | "codemirror_mode": {
203 | "name": "ipython",
204 | "version": 3
205 | },
206 | "file_extension": ".py",
207 | "mimetype": "text/x-python",
208 | "name": "python",
209 | "nbconvert_exporter": "python",
210 | "pygments_lexer": "ipython3",
211 | "version": "3.5.3"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 2
216 | }
217 |
--------------------------------------------------------------------------------
/Lab05_Multi_Layer_Percentron/Lab05_Multilayer_Perceptron.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data/\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": true
31 | },
32 | "outputs": [],
33 | "source": [
34 | "X = tf.placeholder(tf.float32, [None,784])\n",
35 | "#hidden\n",
36 | "hidden = 512\n",
37 | "W1 = tf.Variable(tf.truncated_normal([784,hidden],stddev=0.1))\n",
38 | "b1 = tf.Variable(tf.zeros([hidden]))\n",
39 | "#out layer\n",
40 | "W2 = tf.Variable(tf.truncated_normal([hidden,10],stddev=0.1))\n",
41 | "b2 = tf.Variable(tf.zeros([10]))"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {
48 | "collapsed": false
49 | },
50 | "outputs": [],
51 | "source": [
52 | "#model\n",
53 | "net = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n",
54 | "net1 = tf.matmul(net, W2) + b2\n",
55 | "Y = tf.nn.softmax(net1)\n",
56 | "Y_ = tf.placeholder(tf.float32, [None, 10])"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {
63 | "collapsed": false
64 | },
65 | "outputs": [],
66 | "source": [
67 | "#loss function\n",
68 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=net1))\n",
69 | "\n",
70 | "# % of correct answers found in batch\n",
71 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
72 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
73 | "\n",
74 | "# Create a summary to monitor cost tensor\n",
75 | "tf.summary.scalar('loss', cross_entropy)\n",
76 | "# Create a summary to monitor accuracy tensor\n",
77 | "tf.summary.scalar('accuracy', accuracy)\n",
78 | "#for weight\n",
79 | "with tf.name_scope('Weights'):\n",
80 | " tf.summary.histogram(\"weight1\", W1)\n",
81 | " tf.summary.histogram(\"weight2\", W2)\n",
82 | " tf.summary.histogram(\"bias_1\", b1)\n",
83 | " tf.summary.histogram(\"bias_2\", b2)\n",
84 | "\n",
85 | "summary_op = tf.summary.merge_all()\n",
86 | "\n",
87 | "optimizer = tf.train.AdamOptimizer()\n",
88 | "train_step = optimizer.minimize(cross_entropy)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {
95 | "collapsed": false
96 | },
97 | "outputs": [],
98 | "source": [
99 | "#create session\n",
100 | "sess = tf.InteractiveSession()\n",
101 | "tf.global_variables_initializer().run()\n",
102 | "\n",
103 | "#create summary op to write logs to Tensorboard\n",
104 | "train_summary_writer = tf.summary.FileWriter('../logs/mlp_train', graph=sess.graph)\n",
105 | "test_summary_writer = tf.summary.FileWriter('../logs/mlp_test', graph=sess.graph)\n",
106 | "\n",
107 | "for i in range(10000):\n",
108 | " #load batch of images and correct answers\n",
109 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
110 | " train_data={X: batch_X, Y_: batch_Y}\n",
111 | " #train \n",
112 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
113 | " # Write logs at every iteration\n",
114 | " train_summary_writer.add_summary(summary,i)\n",
115 | " if i % 100 == 0: \n",
116 | " #success ?\n",
117 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
118 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
119 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
120 | " test_summary_writer.add_summary(summary_test,i)\n",
121 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc))\n",
122 | " \n",
123 | " #--- edit\n",
124 | "#success on test data?\n",
125 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
126 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
127 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {
134 | "collapsed": false
135 | },
136 | "outputs": [],
137 | "source": [
138 | "print(\"Test image size\")\n",
139 | "print(mnist.test.images.shape)\n",
140 | "\n",
141 | "im_test = mnist.test.images[0].reshape([28,28])\n",
142 | "plt.imshow(im_test, cmap= cm.Greys)\n",
143 | "plt.show()\n",
144 | "#feed test again. By interactive sess we can use eval without sess !! easy!?\n",
145 | "res = Y.eval(feed_dict = test_data)\n",
146 | "print(\"Result size : \")\n",
147 | "print(res.shape)\n",
148 | "print(\"Picking up first response \")\n",
149 | "print(res[0])\n",
150 | "print(\"Softmax percentage : \")\n",
151 | "print(tf.nn.softmax(res[0]).eval())\n",
152 | "print(\"Result are : %d\" % np.argmax(res[0]))"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {
159 | "collapsed": false
160 | },
161 | "outputs": [],
162 | "source": [
163 | "wts = W1.eval()\n",
164 | "wts.shape\n",
165 | "for i in range(0,10):\n",
166 | " im = wts.flatten()[i::512].reshape((28,-1))\n",
167 | " plt.imshow(im, cmap = cm.Greys)\n",
168 | " plt.show()"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {
175 | "collapsed": false
176 | },
177 | "outputs": [],
178 | "source": [
179 | "wts = W2.eval()\n",
180 | "im = wts.flatten()[i::10].reshape([16,32])\n",
181 | "plt.imshow(im, cmap = cm.Greys)\n",
182 | "plt.show()"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {
189 | "collapsed": true
190 | },
191 | "outputs": [],
192 | "source": []
193 | }
194 | ],
195 | "metadata": {
196 | "kernelspec": {
197 | "display_name": "Tensorflow 3",
198 | "language": "python",
199 | "name": "tensorflow"
200 | },
201 | "language_info": {
202 | "codemirror_mode": {
203 | "name": "ipython",
204 | "version": 3
205 | },
206 | "file_extension": ".py",
207 | "mimetype": "text/x-python",
208 | "name": "python",
209 | "nbconvert_exporter": "python",
210 | "pygments_lexer": "ipython3",
211 | "version": "3.5.3"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 2
216 | }
217 |
--------------------------------------------------------------------------------
/Lab06_Deep_Learning/.ipynb_checkpoints/Lab06_Deep_Learning-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "hidden1 = 200\n",
35 | "hidden2 = 100\n",
36 | "hidden3 = 60\n",
37 | "\n",
38 | "X = tf.placeholder(tf.float32, [None,784])\n",
39 | "#hidden\n",
40 | "W1 = tf.Variable(tf.truncated_normal([784,hidden1],stddev=0.1))\n",
41 | "b1 = tf.Variable(tf.zeros([hidden1]))\n",
42 | "\n",
43 | "W2 = tf.Variable(tf.truncated_normal([hidden1,hidden2],stddev=0.1))\n",
44 | "b2 = tf.Variable(tf.zeros([hidden2]))\n",
45 | "\n",
46 | "W3 = tf.Variable(tf.truncated_normal([hidden2,hidden3],stddev=0.1))\n",
47 | "b3 = tf.Variable(tf.zeros([hidden3]))\n",
48 | "#out layer\n",
49 | "W4 = tf.Variable(tf.truncated_normal([hidden3,10],stddev=0.1))\n",
50 | "b4 = tf.Variable(tf.zeros([10]))"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": true
58 | },
59 | "outputs": [],
60 | "source": [
61 | "#model\n",
62 | "fc1 = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n",
63 | "fc2 = tf.nn.sigmoid(tf.matmul(fc1, W2) + b2)\n",
64 | "fc3 = tf.nn.sigmoid(tf.matmul(fc2, W3) + b3)\n",
65 | "Ylogits = tf.matmul(fc3, W4) + b4\n",
66 | "Y = tf.nn.softmax(Ylogits)\n",
67 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
68 | "\n",
69 | "#loss function\n",
70 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Ylogits))\n",
71 | "\n",
72 | "# % of correct answers found in batch\n",
73 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
74 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
75 | "\n",
76 | "# Create a summary to monitor cost tensor\n",
77 | "tf.summary.scalar('loss', cross_entropy)\n",
78 | "# Create a summary to monitor accuracy tensor\n",
79 | "tf.summary.scalar('accuracy', accuracy)\n",
80 | "#for weight\n",
81 | "with tf.name_scope('Weights'):\n",
82 | " tf.summary.histogram(\"weight1\", W1)\n",
83 | " tf.summary.histogram(\"weight2\", W2)\n",
84 | " tf.summary.histogram(\"bias_1\", b1)\n",
85 | " tf.summary.histogram(\"bias_2\", b2)\n",
86 | "\n",
87 | "summary_op = tf.summary.merge_all()\n",
88 | "\n",
89 | "#Trainer\n",
90 | "optimizer = tf.train.AdamOptimizer()\n",
91 | "train_step = optimizer.minimize(cross_entropy)"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {
98 | "collapsed": false,
99 | "scrolled": false
100 | },
101 | "outputs": [],
102 | "source": [
103 | "#create session\n",
104 | "sess = tf.InteractiveSession()\n",
105 | "tf.global_variables_initializer().run()\n",
106 | "\n",
107 | "#create summary op to write logs to Tensorboard\n",
108 | "train_summary_writer = tf.summary.FileWriter('../logs/deep_sig_train', graph=sess.graph)\n",
109 | "test_summary_writer = tf.summary.FileWriter('../logs/deep_sig_test', graph=sess.graph)\n",
110 | "\n",
111 | "for i in range(10000):\n",
112 | " #load batch of images and correct answers\n",
113 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
114 | " train_data={X: batch_X, Y_: batch_Y}\n",
115 | " \n",
116 | " #train \n",
117 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
118 | " # Write logs at every iteration\n",
119 | " train_summary_writer.add_summary(summary,i)\n",
120 | " if i % 100 == 0: \n",
121 | " #success ?\n",
122 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
123 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
124 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
125 | " test_summary_writer.add_summary(summary_test,i)\n",
126 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc)) \n",
127 | " #--- edit\n",
128 | "#success on test data?\n",
129 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
130 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
131 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "Try Add more 30 hidden layer
"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "metadata": {
145 | "collapsed": true
146 | },
147 | "outputs": [],
148 | "source": []
149 | }
150 | ],
151 | "metadata": {
152 | "kernelspec": {
153 | "display_name": "Tensorflow 3",
154 | "language": "python",
155 | "name": "tensorflow"
156 | },
157 | "language_info": {
158 | "codemirror_mode": {
159 | "name": "ipython",
160 | "version": 3
161 | },
162 | "file_extension": ".py",
163 | "mimetype": "text/x-python",
164 | "name": "python",
165 | "nbconvert_exporter": "python",
166 | "pygments_lexer": "ipython3",
167 | "version": "3.5.3"
168 | }
169 | },
170 | "nbformat": 4,
171 | "nbformat_minor": 2
172 | }
173 |
--------------------------------------------------------------------------------
/Lab06_Deep_Learning/Lab06_Deep_Learning.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import\n",
12 | "from __future__ import division\n",
13 | "from __future__ import print_function\n",
14 | "import matplotlib.pyplot as plt\n",
15 | "import matplotlib.cm as cm\n",
16 | "import matplotlib\n",
17 | "\n",
18 | "%matplotlib inline\n",
19 | "\n",
20 | "import tensorflow as tf\n",
21 | "import numpy as np\n",
22 | "from tensorflow.examples.tutorials.mnist import input_data\n",
23 | "mnist = input_data.read_data_sets(\"../data\", one_hot = True)"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "collapsed": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "hidden1 = 200\n",
35 | "hidden2 = 100\n",
36 | "hidden3 = 60\n",
37 | "\n",
38 | "X = tf.placeholder(tf.float32, [None,784])\n",
39 | "#hidden\n",
40 | "W1 = tf.Variable(tf.truncated_normal([784,hidden1],stddev=0.1))\n",
41 | "b1 = tf.Variable(tf.zeros([hidden1]))\n",
42 | "\n",
43 | "W2 = tf.Variable(tf.truncated_normal([hidden1,hidden2],stddev=0.1))\n",
44 | "b2 = tf.Variable(tf.zeros([hidden2]))\n",
45 | "\n",
46 | "W3 = tf.Variable(tf.truncated_normal([hidden2,hidden3],stddev=0.1))\n",
47 | "b3 = tf.Variable(tf.zeros([hidden3]))\n",
48 | "#out layer\n",
49 | "W4 = tf.Variable(tf.truncated_normal([hidden3,10],stddev=0.1))\n",
50 | "b4 = tf.Variable(tf.zeros([10]))"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": true
58 | },
59 | "outputs": [],
60 | "source": [
61 | "#model\n",
62 | "fc1 = tf.nn.sigmoid(tf.matmul(X, W1) + b1)\n",
63 | "fc2 = tf.nn.sigmoid(tf.matmul(fc1, W2) + b2)\n",
64 | "fc3 = tf.nn.sigmoid(tf.matmul(fc2, W3) + b3)\n",
65 | "Ylogits = tf.matmul(fc3, W4) + b4\n",
66 | "Y = tf.nn.softmax(Ylogits)\n",
67 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
68 | "\n",
69 | "#loss function\n",
70 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Ylogits))\n",
71 | "\n",
72 | "# % of correct answers found in batch\n",
73 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
74 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
75 | "\n",
76 | "# Create a summary to monitor cost tensor\n",
77 | "tf.summary.scalar('loss', cross_entropy)\n",
78 | "# Create a summary to monitor accuracy tensor\n",
79 | "tf.summary.scalar('accuracy', accuracy)\n",
80 | "#for weight\n",
81 | "with tf.name_scope('Weights'):\n",
82 | " tf.summary.histogram(\"weight1\", W1)\n",
83 | " tf.summary.histogram(\"weight2\", W2)\n",
84 | " tf.summary.histogram(\"bias_1\", b1)\n",
85 | " tf.summary.histogram(\"bias_2\", b2)\n",
86 | "\n",
87 | "summary_op = tf.summary.merge_all()\n",
88 | "\n",
89 | "#Trainer\n",
90 | "optimizer = tf.train.AdamOptimizer()\n",
91 | "train_step = optimizer.minimize(cross_entropy)"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {
98 | "collapsed": false,
99 | "scrolled": false
100 | },
101 | "outputs": [],
102 | "source": [
103 | "#create session\n",
104 | "sess = tf.InteractiveSession()\n",
105 | "tf.global_variables_initializer().run()\n",
106 | "\n",
107 | "#create summary op to write logs to Tensorboard\n",
108 | "train_summary_writer = tf.summary.FileWriter('../logs/deep_sig_train', graph=sess.graph)\n",
109 | "test_summary_writer = tf.summary.FileWriter('../logs/deep_sig_test', graph=sess.graph)\n",
110 | "\n",
111 | "for i in range(10000):\n",
112 | " #load batch of images and correct answers\n",
113 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
114 | " train_data={X: batch_X, Y_: batch_Y}\n",
115 | " \n",
116 | " #train \n",
117 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
118 | " # Write logs at every iteration\n",
119 | " train_summary_writer.add_summary(summary,i)\n",
120 | " if i % 100 == 0: \n",
121 | " #success ?\n",
122 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
123 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
124 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
125 | " test_summary_writer.add_summary(summary_test,i)\n",
126 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc)) \n",
127 | " #--- edit\n",
128 | "#success on test data?\n",
129 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
130 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
131 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {},
137 | "source": [
138 | "Try Add more 30 hidden layer
"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": null,
144 | "metadata": {
145 | "collapsed": true
146 | },
147 | "outputs": [],
148 | "source": []
149 | }
150 | ],
151 | "metadata": {
152 | "kernelspec": {
153 | "display_name": "Tensorflow 3",
154 | "language": "python",
155 | "name": "tensorflow"
156 | },
157 | "language_info": {
158 | "codemirror_mode": {
159 | "name": "ipython",
160 | "version": 3
161 | },
162 | "file_extension": ".py",
163 | "mimetype": "text/x-python",
164 | "name": "python",
165 | "nbconvert_exporter": "python",
166 | "pygments_lexer": "ipython3",
167 | "version": "3.5.3"
168 | }
169 | },
170 | "nbformat": 4,
171 | "nbformat_minor": 2
172 | }
173 |
--------------------------------------------------------------------------------
/Lab07_Deep_Learning_Ext/Lab07_Deep_Learning_Ext.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [
10 | {
11 | "name": "stdout",
12 | "output_type": "stream",
13 | "text": [
14 | "Extracting data/train-images-idx3-ubyte.gz\n",
15 | "Extracting data/train-labels-idx1-ubyte.gz\n",
16 | "Extracting data/t10k-images-idx3-ubyte.gz\n",
17 | "Extracting data/t10k-labels-idx1-ubyte.gz\n"
18 | ]
19 | }
20 | ],
21 | "source": [
22 | "from __future__ import absolute_import\n",
23 | "from __future__ import division\n",
24 | "from __future__ import print_function\n",
25 | "import matplotlib.pyplot as plt\n",
26 | "import matplotlib.cm as cm\n",
27 | "import matplotlib\n",
28 | "\n",
29 | "%matplotlib inline\n",
30 | "\n",
31 | "import tensorflow as tf\n",
32 | "import numpy as np\n",
33 | "from tensorflow.examples.tutorials.mnist import input_data\n",
34 | "mnist = input_data.read_data_sets(\"data/\", one_hot = True)"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 2,
40 | "metadata": {
41 | "collapsed": true
42 | },
43 | "outputs": [],
44 | "source": [
45 | "hidden1 = 200\n",
46 | "hidden2 = 100\n",
47 | "hidden3 = 60\n",
48 | "hidden4 = 30\n",
49 | "\n",
50 | "X = tf.placeholder(tf.float32, [None,784])\n",
51 | "#hidden\n",
52 | "W1 = tf.Variable(tf.truncated_normal([784,hidden1],stddev=0.1))\n",
53 | "b1 = tf.Variable(tf.ones([hidden1])/10)\n",
54 | "\n",
55 | "W2 = tf.Variable(tf.truncated_normal([hidden1,hidden2],stddev=0.1))\n",
56 | "b2 = tf.Variable(tf.ones([hidden2])/10)\n",
57 | "\n",
58 | "W3 = tf.Variable(tf.truncated_normal([hidden2,hidden3],stddev=0.1))\n",
59 | "b3 = tf.Variable(tf.ones([hidden3])/10)\n",
60 | "\n",
61 | "W4 = tf.Variable(tf.truncated_normal([hidden3,hidden4],stddev=0.1))\n",
62 | "b4 = tf.Variable(tf.ones([hidden4])/10)\n",
63 | "#out layer\n",
64 | "W5 = tf.Variable(tf.truncated_normal([hidden4,10],stddev=0.1))\n",
65 | "b5 = tf.Variable(tf.zeros([10]))"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 3,
71 | "metadata": {
72 | "collapsed": false
73 | },
74 | "outputs": [],
75 | "source": [
76 | "#model\n",
77 | "fc1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n",
78 | "fc2 = tf.nn.relu(tf.matmul(fc1, W2) + b2)\n",
79 | "fc3 = tf.nn.relu(tf.matmul(fc2, W3) + b3)\n",
80 | "fc4 = tf.nn.relu(tf.matmul(fc3, W4) + b4)\n",
81 | "Ylogits = tf.matmul(fc4, W5) + b5\n",
82 | "Y = tf.nn.softmax(Ylogits)\n",
83 | "Y_ = tf.placeholder(tf.float32, [None, 10])\n",
84 | "\n",
85 | "#loss function\n",
86 | "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Ylogits))\n",
87 | "\n",
88 | "# % of correct answers found in batch\n",
89 | "is_correct = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))\n",
90 | "accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\n",
91 | "\n",
92 | "# Create a summary to monitor cost tensor\n",
93 | "tf.summary.scalar('loss', cross_entropy)\n",
94 | "# Create a summary to monitor accuracy tensor\n",
95 | "tf.summary.scalar('accuracy', accuracy)\n",
96 | "#for weight\n",
97 | "with tf.name_scope('Weights'):\n",
98 | " tf.summary.histogram(\"weight1\", W1)\n",
99 | " tf.summary.histogram(\"weight2\", W2)\n",
100 | " tf.summary.histogram(\"bias_1\", b1)\n",
101 | " tf.summary.histogram(\"bias_2\", b2)\n",
102 | "\n",
103 | "summary_op = tf.summary.merge_all()\n",
104 | "\n",
105 | "#Trainer\n",
106 | "optimizer = tf.train.AdamOptimizer()\n",
107 | "train_step = optimizer.minimize(cross_entropy)"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 4,
113 | "metadata": {
114 | "collapsed": false,
115 | "scrolled": false
116 | },
117 | "outputs": [
118 | {
119 | "name": "stdout",
120 | "output_type": "stream",
121 | "text": [
122 | "Step : 0 Batch : acc = 0.1200 loss = 2.2663 | Test acc = 0.1134 loss = 2.2903\n",
123 | "Step : 100 Batch : acc = 0.8900 loss = 0.3858 | Test acc = 0.8660 loss = 0.4556\n",
124 | "Step : 200 Batch : acc = 0.9200 loss = 0.2332 | Test acc = 0.9177 loss = 0.2926\n",
125 | "Step : 300 Batch : acc = 0.9600 loss = 0.1652 | Test acc = 0.9317 loss = 0.2377\n",
126 | "Step : 400 Batch : acc = 0.9200 loss = 0.2359 | Test acc = 0.9350 loss = 0.2143\n",
127 | "Step : 500 Batch : acc = 0.9600 loss = 0.0932 | Test acc = 0.9455 loss = 0.1859\n",
128 | "Step : 600 Batch : acc = 0.9500 loss = 0.1298 | Test acc = 0.9479 loss = 0.1706\n",
129 | "Step : 700 Batch : acc = 0.9800 loss = 0.0562 | Test acc = 0.9577 loss = 0.1413\n",
130 | "Step : 800 Batch : acc = 0.9700 loss = 0.1041 | Test acc = 0.9613 loss = 0.1314\n",
131 | "Step : 900 Batch : acc = 0.9800 loss = 0.1236 | Test acc = 0.9637 loss = 0.1238\n",
132 | "Step : 1000 Batch : acc = 0.9400 loss = 0.1003 | Test acc = 0.9636 loss = 0.1225\n",
133 | "Step : 1100 Batch : acc = 0.9800 loss = 0.0614 | Test acc = 0.9588 loss = 0.1324\n",
134 | "Step : 1200 Batch : acc = 0.9600 loss = 0.0949 | Test acc = 0.9658 loss = 0.1105\n",
135 | "Step : 1300 Batch : acc = 0.9600 loss = 0.1036 | Test acc = 0.9691 loss = 0.0990\n",
136 | "Step : 1400 Batch : acc = 0.9900 loss = 0.0470 | Test acc = 0.9681 loss = 0.1020\n",
137 | "Step : 1500 Batch : acc = 0.9600 loss = 0.1314 | Test acc = 0.9653 loss = 0.1112\n",
138 | "Step : 1600 Batch : acc = 1.0000 loss = 0.0317 | Test acc = 0.9703 loss = 0.0981\n",
139 | "Step : 1700 Batch : acc = 0.9600 loss = 0.0744 | Test acc = 0.9711 loss = 0.0925\n",
140 | "Step : 1800 Batch : acc = 1.0000 loss = 0.0220 | Test acc = 0.9700 loss = 0.0988\n",
141 | "Step : 1900 Batch : acc = 0.9900 loss = 0.0801 | Test acc = 0.9694 loss = 0.0990\n",
142 | "Step : 2000 Batch : acc = 0.9800 loss = 0.0860 | Test acc = 0.9741 loss = 0.0839\n",
143 | "Step : 2100 Batch : acc = 0.9900 loss = 0.0378 | Test acc = 0.9720 loss = 0.0881\n",
144 | "Step : 2200 Batch : acc = 0.9700 loss = 0.0710 | Test acc = 0.9736 loss = 0.0903\n",
145 | "Step : 2300 Batch : acc = 0.9900 loss = 0.0332 | Test acc = 0.9713 loss = 0.0920\n",
146 | "Step : 2400 Batch : acc = 0.9600 loss = 0.0953 | Test acc = 0.9741 loss = 0.0835\n",
147 | "Step : 2500 Batch : acc = 0.9800 loss = 0.0674 | Test acc = 0.9738 loss = 0.0834\n",
148 | "Step : 2600 Batch : acc = 0.9900 loss = 0.0184 | Test acc = 0.9763 loss = 0.0778\n",
149 | "Step : 2700 Batch : acc = 1.0000 loss = 0.0115 | Test acc = 0.9736 loss = 0.0843\n",
150 | "Step : 2800 Batch : acc = 1.0000 loss = 0.0029 | Test acc = 0.9725 loss = 0.0905\n",
151 | "Step : 2900 Batch : acc = 1.0000 loss = 0.0109 | Test acc = 0.9739 loss = 0.0857\n",
152 | "Step : 3000 Batch : acc = 1.0000 loss = 0.0092 | Test acc = 0.9733 loss = 0.0912\n",
153 | "Step : 3100 Batch : acc = 0.9900 loss = 0.0407 | Test acc = 0.9767 loss = 0.0783\n",
154 | "Step : 3200 Batch : acc = 1.0000 loss = 0.0113 | Test acc = 0.9684 loss = 0.1038\n",
155 | "Step : 3300 Batch : acc = 1.0000 loss = 0.0090 | Test acc = 0.9750 loss = 0.0846\n",
156 | "Step : 3400 Batch : acc = 0.9900 loss = 0.0328 | Test acc = 0.9738 loss = 0.0936\n",
157 | "Step : 3500 Batch : acc = 1.0000 loss = 0.0121 | Test acc = 0.9758 loss = 0.0845\n",
158 | "Step : 3600 Batch : acc = 0.9900 loss = 0.0289 | Test acc = 0.9702 loss = 0.1000\n",
159 | "Step : 3700 Batch : acc = 0.9900 loss = 0.0436 | Test acc = 0.9742 loss = 0.0978\n",
160 | "Step : 3800 Batch : acc = 1.0000 loss = 0.0071 | Test acc = 0.9769 loss = 0.0824\n",
161 | "Step : 3900 Batch : acc = 1.0000 loss = 0.0148 | Test acc = 0.9746 loss = 0.0892\n",
162 | "Step : 4000 Batch : acc = 1.0000 loss = 0.0059 | Test acc = 0.9762 loss = 0.0855\n",
163 | "Step : 4100 Batch : acc = 1.0000 loss = 0.0051 | Test acc = 0.9736 loss = 0.0951\n",
164 | "Step : 4200 Batch : acc = 1.0000 loss = 0.0065 | Test acc = 0.9743 loss = 0.0898\n",
165 | "Step : 4300 Batch : acc = 1.0000 loss = 0.0159 | Test acc = 0.9749 loss = 0.0878\n",
166 | "Step : 4400 Batch : acc = 1.0000 loss = 0.0057 | Test acc = 0.9732 loss = 0.0897\n",
167 | "Step : 4500 Batch : acc = 0.9900 loss = 0.0220 | Test acc = 0.9772 loss = 0.0866\n",
168 | "Step : 4600 Batch : acc = 1.0000 loss = 0.0025 | Test acc = 0.9761 loss = 0.0861\n",
169 | "Step : 4700 Batch : acc = 1.0000 loss = 0.0031 | Test acc = 0.9759 loss = 0.0909\n",
170 | "Step : 4800 Batch : acc = 0.9900 loss = 0.0219 | Test acc = 0.9732 loss = 0.1011\n",
171 | "Step : 4900 Batch : acc = 1.0000 loss = 0.0100 | Test acc = 0.9745 loss = 0.0948\n",
172 | "Step : 5000 Batch : acc = 1.0000 loss = 0.0140 | Test acc = 0.9720 loss = 0.1160\n",
173 | "Step : 5100 Batch : acc = 1.0000 loss = 0.0057 | Test acc = 0.9775 loss = 0.0887\n",
174 | "Step : 5200 Batch : acc = 1.0000 loss = 0.0015 | Test acc = 0.9752 loss = 0.0980\n",
175 | "Step : 5300 Batch : acc = 1.0000 loss = 0.0087 | Test acc = 0.9747 loss = 0.1045\n",
176 | "Step : 5400 Batch : acc = 0.9900 loss = 0.0224 | Test acc = 0.9770 loss = 0.0992\n",
177 | "Step : 5500 Batch : acc = 1.0000 loss = 0.0028 | Test acc = 0.9731 loss = 0.1054\n",
178 | "Step : 5600 Batch : acc = 1.0000 loss = 0.0103 | Test acc = 0.9778 loss = 0.0878\n",
179 | "Step : 5700 Batch : acc = 0.9900 loss = 0.0273 | Test acc = 0.9775 loss = 0.0942\n",
180 | "Step : 5800 Batch : acc = 0.9900 loss = 0.0292 | Test acc = 0.9752 loss = 0.0999\n",
181 | "Step : 5900 Batch : acc = 0.9900 loss = 0.0236 | Test acc = 0.9776 loss = 0.0980\n",
182 | "Step : 6000 Batch : acc = 1.0000 loss = 0.0009 | Test acc = 0.9771 loss = 0.0925\n",
183 | "Step : 6100 Batch : acc = 1.0000 loss = 0.0054 | Test acc = 0.9770 loss = 0.0930\n",
184 | "Step : 6200 Batch : acc = 1.0000 loss = 0.0073 | Test acc = 0.9786 loss = 0.0916\n",
185 | "Step : 6300 Batch : acc = 1.0000 loss = 0.0019 | Test acc = 0.9753 loss = 0.1012\n",
186 | "Step : 6400 Batch : acc = 1.0000 loss = 0.0007 | Test acc = 0.9780 loss = 0.0923\n",
187 | "Step : 6500 Batch : acc = 1.0000 loss = 0.0037 | Test acc = 0.9753 loss = 0.1007\n",
188 | "Step : 6600 Batch : acc = 1.0000 loss = 0.0020 | Test acc = 0.9757 loss = 0.0957\n",
189 | "Step : 6700 Batch : acc = 1.0000 loss = 0.0020 | Test acc = 0.9774 loss = 0.0933\n",
190 | "Step : 6800 Batch : acc = 1.0000 loss = 0.0017 | Test acc = 0.9766 loss = 0.1091\n",
191 | "Step : 6900 Batch : acc = 1.0000 loss = 0.0034 | Test acc = 0.9738 loss = 0.1092\n",
192 | "Step : 7000 Batch : acc = 1.0000 loss = 0.0051 | Test acc = 0.9741 loss = 0.1094\n",
193 | "Step : 7100 Batch : acc = 0.9900 loss = 0.0328 | Test acc = 0.9769 loss = 0.1043\n",
194 | "Step : 7200 Batch : acc = 1.0000 loss = 0.0015 | Test acc = 0.9757 loss = 0.1051\n",
195 | "Step : 7300 Batch : acc = 1.0000 loss = 0.0022 | Test acc = 0.9769 loss = 0.1040\n",
196 | "Step : 7400 Batch : acc = 1.0000 loss = 0.0060 | Test acc = 0.9734 loss = 0.1121\n",
197 | "Step : 7500 Batch : acc = 1.0000 loss = 0.0046 | Test acc = 0.9752 loss = 0.1112\n",
198 | "Step : 7600 Batch : acc = 1.0000 loss = 0.0034 | Test acc = 0.9763 loss = 0.1033\n",
199 | "Step : 7700 Batch : acc = 1.0000 loss = 0.0019 | Test acc = 0.9784 loss = 0.0853\n",
200 | "Step : 7800 Batch : acc = 0.9900 loss = 0.0135 | Test acc = 0.9779 loss = 0.1022\n",
201 | "Step : 7900 Batch : acc = 1.0000 loss = 0.0017 | Test acc = 0.9766 loss = 0.1171\n",
202 | "Step : 8000 Batch : acc = 1.0000 loss = 0.0005 | Test acc = 0.9732 loss = 0.1268\n",
203 | "Step : 8100 Batch : acc = 1.0000 loss = 0.0049 | Test acc = 0.9790 loss = 0.1031\n",
204 | "Step : 8200 Batch : acc = 1.0000 loss = 0.0042 | Test acc = 0.9770 loss = 0.1015\n",
205 | "Step : 8300 Batch : acc = 1.0000 loss = 0.0041 | Test acc = 0.9722 loss = 0.1257\n",
206 | "Step : 8400 Batch : acc = 1.0000 loss = 0.0017 | Test acc = 0.9771 loss = 0.1046\n",
207 | "Step : 8500 Batch : acc = 1.0000 loss = 0.0008 | Test acc = 0.9750 loss = 0.1208\n",
208 | "Step : 8600 Batch : acc = 0.9900 loss = 0.0347 | Test acc = 0.9782 loss = 0.1016\n",
209 | "Step : 8700 Batch : acc = 0.9900 loss = 0.0080 | Test acc = 0.9774 loss = 0.1089\n",
210 | "Step : 8800 Batch : acc = 1.0000 loss = 0.0010 | Test acc = 0.9736 loss = 0.1334\n",
211 | "Step : 8900 Batch : acc = 1.0000 loss = 0.0026 | Test acc = 0.9775 loss = 0.1012\n",
212 | "Step : 9000 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9776 loss = 0.1068\n",
213 | "Step : 9100 Batch : acc = 1.0000 loss = 0.0021 | Test acc = 0.9772 loss = 0.1017\n",
214 | "Step : 9200 Batch : acc = 1.0000 loss = 0.0008 | Test acc = 0.9780 loss = 0.1025\n",
215 | "Step : 9300 Batch : acc = 1.0000 loss = 0.0039 | Test acc = 0.9765 loss = 0.1049\n",
216 | "Step : 9400 Batch : acc = 1.0000 loss = 0.0016 | Test acc = 0.9777 loss = 0.1046\n",
217 | "Step : 9500 Batch : acc = 1.0000 loss = 0.0004 | Test acc = 0.9777 loss = 0.1020\n",
218 | "Step : 9600 Batch : acc = 1.0000 loss = 0.0017 | Test acc = 0.9762 loss = 0.1246\n",
219 | "Step : 9700 Batch : acc = 1.0000 loss = 0.0016 | Test acc = 0.9761 loss = 0.1215\n",
220 | "Step : 9800 Batch : acc = 1.0000 loss = 0.0035 | Test acc = 0.9759 loss = 0.1149\n",
221 | "Step : 9900 Batch : acc = 1.0000 loss = 0.0019 | Test acc = 0.9768 loss = 0.1074\n",
222 | "Step : 10000 Batch : acc = 1.0000 loss = 0.0022 | Test acc = 0.9805 loss = 0.1010\n",
223 | "Step : 10100 Batch : acc = 1.0000 loss = 0.0013 | Test acc = 0.9766 loss = 0.1182\n",
224 | "Step : 10200 Batch : acc = 1.0000 loss = 0.0028 | Test acc = 0.9748 loss = 0.1300\n",
225 | "Step : 10300 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9751 loss = 0.1250\n",
226 | "Step : 10400 Batch : acc = 1.0000 loss = 0.0040 | Test acc = 0.9774 loss = 0.1176\n",
227 | "Step : 10500 Batch : acc = 1.0000 loss = 0.0014 | Test acc = 0.9754 loss = 0.1316\n",
228 | "Step : 10600 Batch : acc = 1.0000 loss = 0.0031 | Test acc = 0.9775 loss = 0.1183\n",
229 | "Step : 10700 Batch : acc = 1.0000 loss = 0.0049 | Test acc = 0.9765 loss = 0.1243\n",
230 | "Step : 10800 Batch : acc = 1.0000 loss = 0.0063 | Test acc = 0.9719 loss = 0.1286\n",
231 | "Step : 10900 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9760 loss = 0.1078\n",
232 | "Step : 11000 Batch : acc = 1.0000 loss = 0.0007 | Test acc = 0.9790 loss = 0.0957\n",
233 | "Step : 11100 Batch : acc = 1.0000 loss = 0.0018 | Test acc = 0.9779 loss = 0.1086\n",
234 | "Step : 11200 Batch : acc = 1.0000 loss = 0.0005 | Test acc = 0.9798 loss = 0.1010\n",
235 | "Step : 11300 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9798 loss = 0.1007\n",
236 | "Step : 11400 Batch : acc = 1.0000 loss = 0.0009 | Test acc = 0.9762 loss = 0.1191\n",
237 | "Step : 11500 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9798 loss = 0.1023\n",
238 | "Step : 11600 Batch : acc = 1.0000 loss = 0.0022 | Test acc = 0.9657 loss = 0.1712\n",
239 | "Step : 11700 Batch : acc = 1.0000 loss = 0.0013 | Test acc = 0.9791 loss = 0.1114\n",
240 | "Step : 11800 Batch : acc = 1.0000 loss = 0.0023 | Test acc = 0.9770 loss = 0.1219\n",
241 | "Step : 11900 Batch : acc = 1.0000 loss = 0.0000 | Test acc = 0.9781 loss = 0.1230\n",
242 | "Step : 12000 Batch : acc = 1.0000 loss = 0.0012 | Test acc = 0.9765 loss = 0.1345\n",
243 | "Step : 12100 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9778 loss = 0.1105\n",
244 | "Step : 12200 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9811 loss = 0.1131\n",
245 | "Step : 12300 Batch : acc = 1.0000 loss = 0.0007 | Test acc = 0.9770 loss = 0.1272\n",
246 | "Step : 12400 Batch : acc = 1.0000 loss = 0.0010 | Test acc = 0.9796 loss = 0.1110\n",
247 | "Step : 12500 Batch : acc = 1.0000 loss = 0.0016 | Test acc = 0.9797 loss = 0.1122\n",
248 | "Step : 12600 Batch : acc = 0.9900 loss = 0.0115 | Test acc = 0.9788 loss = 0.1191\n",
249 | "Step : 12700 Batch : acc = 1.0000 loss = 0.0024 | Test acc = 0.9763 loss = 0.1340\n",
250 | "Step : 12800 Batch : acc = 1.0000 loss = 0.0005 | Test acc = 0.9790 loss = 0.1163\n",
251 | "Step : 12900 Batch : acc = 1.0000 loss = 0.0019 | Test acc = 0.9775 loss = 0.1235\n",
252 | "Step : 13000 Batch : acc = 0.9900 loss = 0.0472 | Test acc = 0.9797 loss = 0.1081\n",
253 | "Step : 13100 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9801 loss = 0.1101\n",
254 | "Step : 13200 Batch : acc = 1.0000 loss = 0.0014 | Test acc = 0.9748 loss = 0.1416\n",
255 | "Step : 13300 Batch : acc = 1.0000 loss = 0.0017 | Test acc = 0.9788 loss = 0.1145\n",
256 | "Step : 13400 Batch : acc = 1.0000 loss = 0.0025 | Test acc = 0.9787 loss = 0.1164\n",
257 | "Step : 13500 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9788 loss = 0.1297\n",
258 | "Step : 13600 Batch : acc = 1.0000 loss = 0.0018 | Test acc = 0.9757 loss = 0.1316\n",
259 | "Step : 13700 Batch : acc = 1.0000 loss = 0.0009 | Test acc = 0.9784 loss = 0.1140\n",
260 | "Step : 13800 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9807 loss = 0.1036\n",
261 | "Step : 13900 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9789 loss = 0.1197\n",
262 | "Step : 14000 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9787 loss = 0.1183\n",
263 | "Step : 14100 Batch : acc = 1.0000 loss = 0.0021 | Test acc = 0.9775 loss = 0.1286\n",
264 | "Step : 14200 Batch : acc = 1.0000 loss = 0.0010 | Test acc = 0.9789 loss = 0.1160\n",
265 | "Step : 14300 Batch : acc = 1.0000 loss = 0.0006 | Test acc = 0.9772 loss = 0.1371\n",
266 | "Step : 14400 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9789 loss = 0.1181\n",
267 | "Step : 14500 Batch : acc = 1.0000 loss = 0.0000 | Test acc = 0.9793 loss = 0.1244\n",
268 | "Step : 14600 Batch : acc = 0.9900 loss = 0.0508 | Test acc = 0.9787 loss = 0.1215\n",
269 | "Step : 14700 Batch : acc = 1.0000 loss = 0.0013 | Test acc = 0.9747 loss = 0.1399\n",
270 | "Step : 14800 Batch : acc = 1.0000 loss = 0.0039 | Test acc = 0.9762 loss = 0.1332\n",
271 | "Step : 14900 Batch : acc = 1.0000 loss = 0.0008 | Test acc = 0.9774 loss = 0.1353\n",
272 | "Step : 15000 Batch : acc = 1.0000 loss = 0.0016 | Test acc = 0.9797 loss = 0.1213\n",
273 | "Step : 15100 Batch : acc = 1.0000 loss = 0.0000 | Test acc = 0.9772 loss = 0.1345\n",
274 | "Step : 15200 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9760 loss = 0.1389\n",
275 | "Step : 15300 Batch : acc = 1.0000 loss = 0.0018 | Test acc = 0.9766 loss = 0.1290\n",
276 | "Step : 15400 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9768 loss = 0.1259\n",
277 | "Step : 15500 Batch : acc = 1.0000 loss = 0.0033 | Test acc = 0.9783 loss = 0.1265\n",
278 | "Step : 15600 Batch : acc = 1.0000 loss = 0.0028 | Test acc = 0.9774 loss = 0.1255\n",
279 | "Step : 15700 Batch : acc = 1.0000 loss = 0.0012 | Test acc = 0.9760 loss = 0.1233\n",
280 | "Step : 15800 Batch : acc = 1.0000 loss = 0.0025 | Test acc = 0.9758 loss = 0.1282\n",
281 | "Step : 15900 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9792 loss = 0.1203\n",
282 | "Step : 16000 Batch : acc = 1.0000 loss = 0.0004 | Test acc = 0.9789 loss = 0.1140\n",
283 | "Step : 16100 Batch : acc = 1.0000 loss = 0.0012 | Test acc = 0.9775 loss = 0.1338\n",
284 | "Step : 16200 Batch : acc = 1.0000 loss = 0.0004 | Test acc = 0.9783 loss = 0.1235\n",
285 | "Step : 16300 Batch : acc = 1.0000 loss = 0.0000 | Test acc = 0.9775 loss = 0.1229\n",
286 | "Step : 16400 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9745 loss = 0.1412\n",
287 | "Step : 16500 Batch : acc = 0.9800 loss = 0.0296 | Test acc = 0.9775 loss = 0.1272\n",
288 | "Step : 16600 Batch : acc = 1.0000 loss = 0.0012 | Test acc = 0.9773 loss = 0.1278\n",
289 | "Step : 16700 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9796 loss = 0.1174\n",
290 | "Step : 16800 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9794 loss = 0.1157\n",
291 | "Step : 16900 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9783 loss = 0.1223\n",
292 | "Step : 17000 Batch : acc = 1.0000 loss = 0.0015 | Test acc = 0.9798 loss = 0.1124\n",
293 | "Step : 17100 Batch : acc = 1.0000 loss = 0.0040 | Test acc = 0.9772 loss = 0.1271\n",
294 | "Step : 17200 Batch : acc = 0.9900 loss = 0.0151 | Test acc = 0.9763 loss = 0.1294\n",
295 | "Step : 17300 Batch : acc = 1.0000 loss = 0.0003 | Test acc = 0.9786 loss = 0.1191\n",
296 | "Step : 17400 Batch : acc = 1.0000 loss = 0.0021 | Test acc = 0.9789 loss = 0.1214\n",
297 | "Step : 17500 Batch : acc = 0.9900 loss = 0.0118 | Test acc = 0.9769 loss = 0.1239\n",
298 | "Step : 17600 Batch : acc = 1.0000 loss = 0.0019 | Test acc = 0.9788 loss = 0.1179\n",
299 | "Step : 17700 Batch : acc = 1.0000 loss = 0.0011 | Test acc = 0.9780 loss = 0.1333\n",
300 | "Step : 17800 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9787 loss = 0.1344\n",
301 | "Step : 17900 Batch : acc = 1.0000 loss = 0.0026 | Test acc = 0.9756 loss = 0.1389\n",
302 | "Step : 18000 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9763 loss = 0.1296\n",
303 | "Step : 18100 Batch : acc = 1.0000 loss = 0.0096 | Test acc = 0.9783 loss = 0.1239\n",
304 | "Step : 18200 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9789 loss = 0.1125\n",
305 | "Step : 18300 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9777 loss = 0.1235\n",
306 | "Step : 18400 Batch : acc = 1.0000 loss = 0.0008 | Test acc = 0.9777 loss = 0.1303\n",
307 | "Step : 18500 Batch : acc = 1.0000 loss = 0.0000 | Test acc = 0.9819 loss = 0.1133\n",
308 | "Step : 18600 Batch : acc = 1.0000 loss = 0.0008 | Test acc = 0.9788 loss = 0.1343\n",
309 | "Step : 18700 Batch : acc = 0.9900 loss = 0.0158 | Test acc = 0.9771 loss = 0.1219\n",
310 | "Step : 18800 Batch : acc = 1.0000 loss = 0.0012 | Test acc = 0.9789 loss = 0.1129\n",
311 | "Step : 18900 Batch : acc = 1.0000 loss = 0.0007 | Test acc = 0.9782 loss = 0.1229\n",
312 | "Step : 19000 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9780 loss = 0.1234\n",
313 | "Step : 19100 Batch : acc = 1.0000 loss = 0.0018 | Test acc = 0.9768 loss = 0.1294\n",
314 | "Step : 19200 Batch : acc = 1.0000 loss = 0.0013 | Test acc = 0.9773 loss = 0.1268\n",
315 | "Step : 19300 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9788 loss = 0.1114\n",
316 | "Step : 19400 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9802 loss = 0.1131\n",
317 | "Step : 19500 Batch : acc = 1.0000 loss = 0.0002 | Test acc = 0.9770 loss = 0.1273\n",
318 | "Step : 19600 Batch : acc = 1.0000 loss = 0.0051 | Test acc = 0.9799 loss = 0.1146\n",
319 | "Step : 19700 Batch : acc = 1.0000 loss = 0.0001 | Test acc = 0.9787 loss = 0.1213\n",
320 | "Step : 19800 Batch : acc = 1.0000 loss = 0.0020 | Test acc = 0.9751 loss = 0.1344\n",
321 | "Step : 19900 Batch : acc = 1.0000 loss = 0.0016 | Test acc = 0.9752 loss = 0.1416\n",
322 | "Test data acc = 0.9796 loss = 0.1231\n"
323 | ]
324 | }
325 | ],
326 | "source": [
327 | "#create session\n",
328 | "sess = tf.InteractiveSession()\n",
329 | "tf.global_variables_initializer().run()\n",
330 | "\n",
331 | "#create summary op to write logs to Tensorboard\n",
332 | "train_summary_writer = tf.summary.FileWriter('logs/deep_relu_train', graph=sess.graph)\n",
333 | "test_summary_writer = tf.summary.FileWriter('logs/deep_relu_test', graph=sess.graph)\n",
334 | "\n",
335 | "for i in range(10000):\n",
336 | " #load batch of images and correct answers\n",
337 | " batch_X, batch_Y = mnist.train.next_batch(100)\n",
338 | " train_data={X: batch_X, Y_: batch_Y}\n",
339 | " \n",
340 | " #train \n",
341 | " _,summary = sess.run([train_step,summary_op], feed_dict=train_data)\n",
342 | " # Write logs at every iteration\n",
343 | " train_summary_writer.add_summary(summary,i)\n",
344 | " if i % 100 == 0: \n",
345 | " #success ?\n",
346 | " ta,tc = sess.run([accuracy,cross_entropy],feed_dict=train_data)\n",
347 | " test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
348 | " va,vc,summary_test = sess.run([accuracy,cross_entropy,summary_op],feed_dict=test_data)\n",
349 | " test_summary_writer.add_summary(summary_test,i)\n",
350 | " print(\"Step : %d Batch : acc = %.4f loss = %.4f | Test acc = %.4f loss = %.4f\" % (i,ta,tc,va,vc)) \n",
351 | " \n",
352 | " #--- edit\n",
353 | "#success on test data?\n",
354 | "test_data = {X: mnist.test.images, Y_: mnist.test.labels}\n",
355 | "a,c = sess.run([accuracy, cross_entropy], feed_dict=test_data)\n",
356 | "print(\"Test data acc = %.4f loss = %.4f\" % (a,c))"
357 | ]
358 | },
359 | {
360 | "cell_type": "code",
361 | "execution_count": null,
362 | "metadata": {
363 | "collapsed": true
364 | },
365 | "outputs": [],
366 | "source": []
367 | }
368 | ],
369 | "metadata": {
370 | "kernelspec": {
371 | "display_name": "Tensorflow 3",
372 | "language": "python",
373 | "name": "tensorflow"
374 | },
375 | "language_info": {
376 | "codemirror_mode": {
377 | "name": "ipython",
378 | "version": 3
379 | },
380 | "file_extension": ".py",
381 | "mimetype": "text/x-python",
382 | "name": "python",
383 | "nbconvert_exporter": "python",
384 | "pygments_lexer": "ipython3",
385 | "version": "3.5.3"
386 | }
387 | },
388 | "nbformat": 4,
389 | "nbformat_minor": 2
390 | }
391 |
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/TFLEARN.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import division, print_function, absolute_import\n",
12 | "\n",
13 | "import tflearn\n",
14 | "from tflearn.layers.core import input_data, dropout, fully_connected\n",
15 | "from tflearn.layers.conv import conv_2d, max_pool_2d\n",
16 | "from tflearn.layers.normalization import local_response_normalization\n",
17 | "from tflearn.layers.estimator import regression\n",
18 | "import os"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {
25 | "collapsed": false
26 | },
27 | "outputs": [],
28 | "source": [
29 | "# Data loading and preprocessing\n",
30 | "import tflearn.datasets.mnist as mnist\n",
31 | "X, Y, testX, testY = mnist.load_data('../data/',one_hot=True)\n",
32 | "X = X.reshape([-1, 28, 28, 1])\n",
33 | "testX = testX.reshape([-1, 28, 28, 1])"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": null,
39 | "metadata": {
40 | "collapsed": true
41 | },
42 | "outputs": [],
43 | "source": [
44 | "# Building convolutional network\n",
45 | "network = input_data(shape=[None, 28, 28, 1], name='input')\n",
46 | "network = conv_2d(network, 32, 5, activation='relu')\n",
47 | "network = max_pool_2d(network, 2)\n",
48 | "network = local_response_normalization(network)\n",
49 | "network = conv_2d(network, 64, 5, activation='relu')\n",
50 | "network = max_pool_2d(network, 2)\n",
51 | "network = local_response_normalization(network)\n",
52 | "network = fully_connected(network, 128, activation='relu')\n",
53 | "network = dropout(network, 0.8)\n",
54 | "network = fully_connected(network, 256, activation='relu')\n",
55 | "network = dropout(network, 0.8)\n",
56 | "network = fully_connected(network, 10, activation='softmax')\n",
57 | "network = regression(network, optimizer='adam', learning_rate=0.01,\n",
58 | " loss='categorical_crossentropy', name='target')"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {
65 | "collapsed": false
66 | },
67 | "outputs": [],
68 | "source": [
69 | "# Training\n",
70 | "if not os.path.isdir('checkpoint'):\n",
71 | " os.mkdir('checkpoint')\n",
72 | "model = tflearn.DNN(network, \n",
73 | " tensorboard_verbose=3,\n",
74 | " tensorboard_dir=\"../logs\",#checkpoint_path='model_checkpoint/checkpoint',\n",
75 | " best_checkpoint_path='checkpoint/')\n",
76 | "model.fit({'input': X}, {'target': Y}, n_epoch=20,\n",
77 | " validation_set=({'input': testX}, {'target': testY}),\n",
78 | " snapshot_step=100, show_metric=True, run_id='convnet_mnist')"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {
85 | "collapsed": true
86 | },
87 | "outputs": [],
88 | "source": []
89 | }
90 | ],
91 | "metadata": {
92 | "kernelspec": {
93 | "display_name": "Tensorflow 3",
94 | "language": "python",
95 | "name": "tensorflow"
96 | },
97 | "language_info": {
98 | "codemirror_mode": {
99 | "name": "ipython",
100 | "version": 3
101 | },
102 | "file_extension": ".py",
103 | "mimetype": "text/x-python",
104 | "name": "python",
105 | "nbconvert_exporter": "python",
106 | "pygments_lexer": "ipython3",
107 | "version": "3.5.3"
108 | }
109 | },
110 | "nbformat": 4,
111 | "nbformat_minor": 2
112 | }
113 |
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/convo1.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/convo1.mp4
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/convo2.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/convo2.mp4
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/convo_in_01.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/convo_in_01.mp4
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/convo_in_02.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/convo_in_02.mp4
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/convo_in_03.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/convo_in_03.mp4
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/model.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/model.jpg
--------------------------------------------------------------------------------
/Lab08_Convolution_Neural_Network/images/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab08_Convolution_Neural_Network/images/model.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/LAB09_Deep_Q_Learning.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#!/usr/bin/env python\n",
12 | "from __future__ import print_function\n",
13 | "\n",
14 | "import tensorflow as tf\n",
15 | "import sys\n",
16 | "sys.path.append(\"game/\")\n",
17 | "import wrapped_flappy_bird as game\n",
18 | "import random\n",
19 | "import numpy as np\n",
20 | "from collections import deque\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "import matplotlib.cm as cm\n",
23 | "\n",
24 | "ACTIONS = 2 # number of valid actions\n",
25 | "GAMMA = 0.99 # decay rate of past observations\n",
26 | "OBSERVE = 1000. # timesteps to observe before training\n",
27 | "EXPLORE = 2000000. # frames over which to anneal epsilon\n",
28 | "FINAL_EPSILON = 0.0001 # final value of epsilon\n",
29 | "INITIAL_EPSILON = 0.1 # starting value of epsilon\n",
30 | "REPLAY_MEMORY = 1000 # number of previous transitions to remember\n",
31 | "BATCH = 128 # size of minibatch\n"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {
38 | "collapsed": true
39 | },
40 | "outputs": [],
41 | "source": [
42 | "def weight_variable(shape):\n",
43 | " initial = tf.truncated_normal(shape, stddev = 0.01)\n",
44 | " return tf.Variable(initial)\n",
45 | "\n",
46 | "def bias_variable(shape):\n",
47 | " initial = tf.constant(0.01, shape = shape)\n",
48 | " return tf.Variable(initial)\n",
49 | "\n",
50 | "def conv2d(x, W, stride):\n",
51 | " return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\n",
52 | "\n",
53 | "def max_pool_2x2(x):\n",
54 | " return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {
61 | "collapsed": true
62 | },
63 | "outputs": [],
64 | "source": [
65 | "#create model\n",
66 | "W_conv1 = weight_variable([8, 8, 4, 32])\n",
67 | "b_conv1 = bias_variable([32])\n",
68 | "\n",
69 | "W_conv2 = weight_variable([4, 4, 32, 64])\n",
70 | "b_conv2 = bias_variable([64])\n",
71 | "\n",
72 | "W_conv3 = weight_variable([3, 3, 64, 64])\n",
73 | "b_conv3 = bias_variable([64])\n",
74 | "\n",
75 | "W_fc1 = weight_variable([1600, 512])\n",
76 | "b_fc1 = bias_variable([512])\n",
77 | "\n",
78 | "W_fc2 = weight_variable([512, ACTIONS])\n",
79 | "b_fc2 = bias_variable([ACTIONS])\n",
80 | "\n",
81 | "\n",
82 | "# input layer\n",
83 | "s = tf.placeholder(\"float\", [None, 80, 80, 4])\n",
84 | "\n",
85 | "# hidden layers\n",
86 | "h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1) ### ===> 80x80x4 conv 4 ==> 20x20x32\n",
87 | "h_pool1 = max_pool_2x2(h_conv1) ### ===> 20x20x32 maxpool => 10x10x64\n",
88 | "\n",
89 | "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2) ###=====10x10 conv2 ===>5x5x64\n",
90 | "#h_pool2 = max_pool_2x2(h_conv2)\n",
91 | "\n",
92 | "h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3) ## 5x5 conv 1 padsame =>5x5x64\n",
93 | "#h_pool3 = max_pool_2x2(h_conv3)\n",
94 | "\n",
95 | "#h_pool3_flat = tf.reshape(h_pool3, [-1, 256]) \n",
96 | "h_conv3_flat = tf.reshape(h_conv3, [-1, 1600]) ##5x5x64 flaten =>1600\n",
97 | "\n",
98 | "h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n",
99 | "\n",
100 | "# readout layer\n",
101 | "readout = tf.matmul(h_fc1, W_fc2) + b_fc2"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {
108 | "collapsed": true
109 | },
110 | "outputs": [],
111 | "source": [
112 | "# define the cost function\n",
113 | "a = tf.placeholder(\"float\", [None, ACTIONS])\n",
114 | "y = tf.placeholder(\"float\", [None])\n",
115 | "\n",
116 | "readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)\n",
117 | "cost = tf.reduce_mean(tf.square(y - readout_action)) #rms root mean square for cost function\n",
118 | "train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)\n",
119 | "\n",
120 | "# open up a game state to communicate with emulator\n",
121 | "game_state = game.GameState()\n",
122 | "\n",
123 | "# saving and loading networks\n",
124 | "saver = tf.train.Saver()\n",
125 | "checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\n",
126 | "#if checkpoint and checkpoint.model_checkpoint_path:\n",
127 | "# saver.restore(sess, checkpoint.model_checkpoint_path)\n",
128 | "# print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n",
129 | "#else:\n",
130 | "# print(\"Could not find old network weights\")\n",
131 | "\n",
132 | "# store the previous observations in replay memory\n",
133 | "D = deque()"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {
140 | "collapsed": true
141 | },
142 | "outputs": [],
143 | "source": [
144 | "sess = tf.InteractiveSession()\n",
145 | "tf.global_variables_initializer().run()"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "metadata": {
152 | "collapsed": false
153 | },
154 | "outputs": [],
155 | "source": [
156 | "def prepare_img(x_t):\n",
157 | " x_t = tf.image.rgb_to_grayscale(x_t)\n",
158 | " x_t = tf.image.resize_images(x_t,[80,80])\n",
159 | " return x_t.eval()\n",
160 | "\n",
161 | "########### TEST Play ################\n",
162 | "# get the first state by doing nothing and preprocess the image to 80x80x4\n",
163 | "do_nothing = np.zeros(ACTIONS)\n",
164 | "do_nothing[0] = 1\n",
165 | "x_t, r_0, terminal = game_state.frame_step(do_nothing)\n",
166 | "x_t = prepare_img(x_t)\n",
167 | "######################################\n",
168 | "# show image\n",
169 | "imtest = np.array(x_t)\n",
170 | "plt.imshow(np.reshape(x_t,[80,80]).T,cmap=cm.Greys)\n",
171 | "plt.show()\n",
172 | "#####################################\n",
173 | "s_t = np.stack((x_t, x_t, x_t, x_t), axis=2).reshape([80,80,4])\n",
174 | "print(s_t.shape)\n",
175 | "############# TEST FEEDING ##########\n",
176 | "read = readout.eval(feed_dict={s : [s_t]})[0]\n",
177 | "print(\"Test readout : \")\n",
178 | "print(read)\n",
179 | "a_t = np.zeros([ACTIONS])\n",
180 | "a_t[0] = 1\n",
181 | "print(\"Test cost : %.4f\" % cost.eval(feed_dict={s : [s_t], a : [a_t], y: a_t}))\n",
182 | "#####################################\n"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": null,
188 | "metadata": {
189 | "collapsed": true
190 | },
191 | "outputs": [],
192 | "source": [
193 | "def get_stage(s_t,a_t):\n",
194 | " # run the selected action and observe next state and reward\n",
195 | " x_t1_colored, r_t, terminal = game_state.frame_step(a_t)\n",
196 | " x_t1 = prepare_img(x_t1_colored)\n",
197 | " x_t1 = np.reshape(x_t1, (80, 80, 1)) \n",
198 | " #s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)\n",
199 | " s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)\n",
200 | " return s_t1, r_t, terminal"
201 | ]
202 | },
203 | {
204 | "cell_type": "code",
205 | "execution_count": null,
206 | "metadata": {
207 | "collapsed": true
208 | },
209 | "outputs": [],
210 | "source": [
211 | "def play_action(epsilon,readout_t):\n",
212 | " a_t = np.zeros([ACTIONS])\n",
213 | " action_index = 0\n",
214 | " if random.random() <= epsilon:\n",
215 | " print(\"----------Random Action----------\")\n",
216 | " action_index = random.randrange(ACTIONS)\n",
217 | " a_t[random.randrange(ACTIONS)] = 1\n",
218 | " else:\n",
219 | " action_index = np.argmax(readout_t)\n",
220 | " a_t[action_index] = 1\n",
221 | " #maybe skip some frame with a_t[0] = 1 # do nothing\n",
222 | " return a_t,action_index"
223 | ]
224 | },
225 | {
226 | "cell_type": "code",
227 | "execution_count": null,
228 | "metadata": {
229 | "collapsed": false
230 | },
231 | "outputs": [],
232 | "source": [
233 | "# start training\n",
234 | "epsilon = INITIAL_EPSILON\n",
235 | "rp = 0\n",
236 | "while len(D) < REPLAY_MEMORY:\n",
237 | " \n",
238 | " # choose an action epsilon greedily\n",
239 | " readout_t = readout.eval(feed_dict={s : [s_t]})[0]\n",
240 | " a_t,action_index = play_action(epsilon,readout_t)\n",
241 | " \n",
242 | " ######### play ! ########\n",
243 | " s_t1,r_t,terminal = get_stage(s_t,a_t)\n",
244 | " \n",
245 | " # store the transition in D\n",
246 | " D.append((s_t, a_t, r_t, s_t1, terminal))\n",
247 | " \n",
248 | " s_t = s_t1\n",
249 | " rp += 1\n",
250 | " if(rp % 100 == 0):\n",
251 | " print(\"TRY PLAY and RECORD : %d max readout %.4f\" % (rp,np.max(readout_t)))"
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": null,
257 | "metadata": {
258 | "collapsed": false
259 | },
260 | "outputs": [],
261 | "source": [
262 | "# start training\n",
263 | "# choose an action epsilon greedily\n",
264 | "readout_t = readout.eval(feed_dict={s : [s_t]})[0]\n",
265 | "a_t,action_index = play_action(epsilon,readout_t)\n",
266 | "\n",
267 | "# scale down epsilon\n",
268 | "if epsilon > FINAL_EPSILON:\n",
269 | " epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n",
270 | "\n",
271 | "# run the selected action and observe next state and reward\n",
272 | "s_t1,r_t,terminal = get_stage(s_t,a_t)\n",
273 | "# store the transition in D\n",
274 | "D.append((s_t, a_t, r_t, s_t1, terminal))\n",
275 | "\n",
276 | "if len(D) > REPLAY_MEMORY:\n",
277 | " D.popleft()\n",
278 | "# sample a minibatch to train on\n",
279 | "minibatch = random.sample(D, BATCH)\n",
280 | "\n",
281 | "# get the batch variables\n",
282 | "s_j_batch = [d[0] for d in minibatch]\n",
283 | "a_batch = [d[1] for d in minibatch]\n",
284 | "r_batch = [d[2] for d in minibatch]\n",
285 | "s_j1_batch = [d[3] for d in minibatch]\n",
286 | "\n",
287 | "y_batch = []\n",
288 | "readout_j1_batch = readout.eval(feed_dict = {s : s_j1_batch})\n",
289 | "for i in range(0, len(minibatch)):\n",
290 | " terminal = minibatch[i][4]\n",
291 | " # if terminal, only equals reward\n",
292 | " if terminal:\n",
293 | " y_batch.append(r_batch[i])\n",
294 | " else:\n",
295 | " y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))\n",
296 | "\n",
297 | "# perform gradient step\n",
298 | "train_step.run(feed_dict = {\n",
299 | " y : y_batch,\n",
300 | " a : a_batch,\n",
301 | " s : s_j_batch}\n",
302 | ")\n",
303 | "\n",
304 | "# update the old values\n",
305 | "s_t = s_t1\n",
306 | "rp += 1\n",
307 | "\n",
308 | "# save progress every 10000 iterations\n",
309 | "if rp % 10000 == 0:\n",
310 | " saver.save(sess, 'saved_networks/dqn', global_step = rp)\n",
311 | "if rp % 1 == 0:\n",
312 | " print(\"TIMESTEP\", rp, \"/ STATE explore / EPSILON\", epsilon, \"/ ACTION\", action_index, \"/ REWARD\", r_t, \\\n",
313 | " \"/ Q_MAX %e\" % np.max(readout_t)) "
314 | ]
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": null,
319 | "metadata": {
320 | "collapsed": true
321 | },
322 | "outputs": [],
323 | "source": []
324 | }
325 | ],
326 | "metadata": {
327 | "kernelspec": {
328 | "display_name": "Tensorflow 3",
329 | "language": "python",
330 | "name": "tensorflow"
331 | },
332 | "language_info": {
333 | "codemirror_mode": {
334 | "name": "ipython",
335 | "version": 3
336 | },
337 | "file_extension": ".py",
338 | "mimetype": "text/x-python",
339 | "name": "python",
340 | "nbconvert_exporter": "python",
341 | "pygments_lexer": "ipython3",
342 | "version": "3.5.3"
343 | }
344 | },
345 | "nbformat": 4,
346 | "nbformat_minor": 2
347 | }
348 |
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/LAB09_Deep_Q_Learning_fin.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "#!/usr/bin/env python\n",
12 | "from __future__ import print_function\n",
13 | "\n",
14 | "import tensorflow as tf\n",
15 | "import sys\n",
16 | "sys.path.append(\"game/\")\n",
17 | "import wrapped_flappy_bird as game\n",
18 | "import random\n",
19 | "import numpy as np\n",
20 | "from collections import deque\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "import matplotlib.cm as cm\n",
23 | "\n",
24 | "ACTIONS = 2 # number of valid actions\n",
25 | "GAMMA = 0.99 # decay rate of past observations\n",
26 | "OBSERVE = 1000000. # timesteps to observe before training\n",
27 | "EXPLORE = 2000000. # frames over which to anneal epsilon\n",
28 | "FINAL_EPSILON = 0.0001 # final value of epsilon\n",
29 | "INITIAL_EPSILON = 0.0001 # starting value of epsilon\n",
30 | "REPLAY_MEMORY = 1000 # number of previous transitions to remember\n",
31 | "BATCH = 128 # size of minibatch\n"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {
38 | "collapsed": true
39 | },
40 | "outputs": [],
41 | "source": [
42 | "def weight_variable(shape):\n",
43 | " initial = tf.truncated_normal(shape, stddev = 0.01)\n",
44 | " return tf.Variable(initial)\n",
45 | "\n",
46 | "def bias_variable(shape):\n",
47 | " initial = tf.constant(0.01, shape = shape)\n",
48 | " return tf.Variable(initial)\n",
49 | "\n",
50 | "def conv2d(x, W, stride):\n",
51 | " return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"SAME\")\n",
52 | "\n",
53 | "def max_pool_2x2(x):\n",
54 | " return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {
61 | "collapsed": true
62 | },
63 | "outputs": [],
64 | "source": [
65 | "#create model\n",
66 | "W_conv1 = weight_variable([8, 8, 4, 32])\n",
67 | "b_conv1 = bias_variable([32])\n",
68 | "\n",
69 | "W_conv2 = weight_variable([4, 4, 32, 64])\n",
70 | "b_conv2 = bias_variable([64])\n",
71 | "\n",
72 | "W_conv3 = weight_variable([3, 3, 64, 64])\n",
73 | "b_conv3 = bias_variable([64])\n",
74 | "\n",
75 | "W_fc1 = weight_variable([1600, 512])\n",
76 | "b_fc1 = bias_variable([512])\n",
77 | "\n",
78 | "W_fc2 = weight_variable([512, ACTIONS])\n",
79 | "b_fc2 = bias_variable([ACTIONS])\n",
80 | "\n",
81 | "\n",
82 | "# input layer\n",
83 | "s = tf.placeholder(\"float\", [None, 80, 80, 4])\n",
84 | "\n",
85 | "# hidden layers\n",
86 | "h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1) ### ===> 80x80x4 conv 4 ==> 20x20x32\n",
87 | "h_pool1 = max_pool_2x2(h_conv1) ### ===> 20x20x32 maxpool => 10x10x64\n",
88 | "\n",
89 | "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2) ###=====10x10 conv2 ===>5x5x64\n",
90 | "#h_pool2 = max_pool_2x2(h_conv2)\n",
91 | "\n",
92 | "h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3) ## 5x5 conv 1 padsame =>5x5x64\n",
93 | "#h_pool3 = max_pool_2x2(h_conv3)\n",
94 | "\n",
95 | "#h_pool3_flat = tf.reshape(h_pool3, [-1, 256]) \n",
96 | "h_conv3_flat = tf.reshape(h_conv3, [-1, 1600]) ##5x5x64 flaten =>1600\n",
97 | "\n",
98 | "h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n",
99 | "\n",
100 | "# readout layer\n",
101 | "readout = tf.matmul(h_fc1, W_fc2) + b_fc2"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "metadata": {
108 | "collapsed": false
109 | },
110 | "outputs": [],
111 | "source": [
112 | "# define the cost function\n",
113 | "a = tf.placeholder(\"float\", [None, ACTIONS])\n",
114 | "y = tf.placeholder(\"float\", [None])\n",
115 | "\n",
116 | "readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)\n",
117 | "cost = tf.reduce_mean(tf.square(y - readout_action)) #rms root mean square for cost function\n",
118 | "train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)\n",
119 | "\n",
120 | "# open up a game state to communicate with emulator\n",
121 | "game_state = game.GameState()\n",
122 | "\n",
123 | "sess = tf.InteractiveSession()\n",
124 | "tf.global_variables_initializer().run()\n",
125 | "\n",
126 | "# saving and loading networks\n",
127 | "saver = tf.train.Saver()\n",
128 | "checkpoint = tf.train.get_checkpoint_state(\"saved_networks\")\n",
129 | "if checkpoint and checkpoint.model_checkpoint_path:\n",
130 | " saver.restore(sess, checkpoint.model_checkpoint_path)\n",
131 | " print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n",
132 | "else:\n",
133 | " print(\"Could not find old network weights\")\n",
134 | "\n",
135 | "# store the previous observations in replay memory\n",
136 | "D = deque()"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": null,
142 | "metadata": {
143 | "collapsed": false
144 | },
145 | "outputs": [],
146 | "source": [
147 | "def prepare_img(x_t):\n",
148 | " x_t = tf.image.rgb_to_grayscale(x_t)\n",
149 | " x_t = tf.image.resize_images(x_t,[80,80])\n",
150 | " return x_t.eval()\n",
151 | "\n",
152 | "########### TEST Play ################\n",
153 | "# get the first state by doing nothing and preprocess the image to 80x80x4\n",
154 | "do_nothing = np.zeros(ACTIONS)\n",
155 | "do_nothing[0] = 1\n",
156 | "x_t, r_0, terminal = game_state.frame_step(do_nothing)\n",
157 | "x_t = prepare_img(x_t)\n",
158 | "######################################\n",
159 | "# show image\n",
160 | "imtest = np.array(x_t)\n",
161 | "plt.imshow(np.reshape(x_t,[80,80]).T,cmap=cm.Greys)\n",
162 | "plt.show()\n",
163 | "#####################################\n",
164 | "s_t = np.stack((x_t, x_t, x_t, x_t), axis=2).reshape([80,80,4])\n",
165 | "print(s_t.shape)\n",
166 | "############# TEST FEEDING ##########\n",
167 | "read = readout.eval(feed_dict={s : [s_t]})[0]\n",
168 | "print(\"Test readout : \")\n",
169 | "print(read)\n",
170 | "a_t = np.zeros([ACTIONS])\n",
171 | "a_t[0] = 1\n",
172 | "print(\"Test cost : %.4f\" % cost.eval(feed_dict={s : [s_t], a : [a_t], y: a_t}))\n",
173 | "#####################################\n"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "metadata": {
180 | "collapsed": true
181 | },
182 | "outputs": [],
183 | "source": [
184 | "def get_stage(s_t,a_t):\n",
185 | " # run the selected action and observe next state and reward\n",
186 | " x_t1_colored, r_t, terminal = game_state.frame_step(a_t)\n",
187 | " x_t1 = prepare_img(x_t1_colored)\n",
188 | " x_t1 = np.reshape(x_t1, (80, 80, 1)) \n",
189 | " #s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)\n",
190 | " s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)\n",
191 | " return s_t1, r_t, terminal"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "metadata": {
198 | "collapsed": true
199 | },
200 | "outputs": [],
201 | "source": [
202 | "def play_action(epsilon,readout_t):\n",
203 | " a_t = np.zeros([ACTIONS])\n",
204 | " action_index = 0\n",
205 | " if random.random() <= epsilon:\n",
206 | " print(\"----------Random Action----------\")\n",
207 | " action_index = random.randrange(ACTIONS)\n",
208 | " a_t[random.randrange(ACTIONS)] = 1\n",
209 | " else:\n",
210 | " action_index = np.argmax(readout_t)\n",
211 | " a_t[action_index] = 1\n",
212 | " #maybe skip some frame with a_t[0] = 1 # do nothing\n",
213 | " return a_t,action_index"
214 | ]
215 | },
216 | {
217 | "cell_type": "code",
218 | "execution_count": null,
219 | "metadata": {
220 | "collapsed": false
221 | },
222 | "outputs": [],
223 | "source": [
224 | "# start training\n",
225 | "epsilon = INITIAL_EPSILON\n",
226 | "rp = 0\n",
227 | "while len(D) < REPLAY_MEMORY:\n",
228 | " \n",
229 | " # choose an action epsilon greedily\n",
230 | " readout_t = readout.eval(feed_dict={s : [s_t]})[0]\n",
231 | " a_t,action_index = play_action(epsilon,readout_t)\n",
232 | " \n",
233 | " ######### play ! ########\n",
234 | " s_t1,r_t,terminal = get_stage(s_t,a_t)\n",
235 | " \n",
236 | " # store the transition in D\n",
237 | " #D.append((s_t, a_t, r_t, s_t1, terminal))\n",
238 | " \n",
239 | " s_t = s_t1\n",
240 | " rp += 1\n",
241 | " if(rp % 100 == 0):\n",
242 | " print(\"TRY PLAY and RECORD : %d max readout %.4f\" % (rp,np.max(readout_t)))"
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "metadata": {
249 | "collapsed": true
250 | },
251 | "outputs": [],
252 | "source": []
253 | }
254 | ],
255 | "metadata": {
256 | "kernelspec": {
257 | "display_name": "Tensorflow 3",
258 | "language": "python",
259 | "name": "tensorflow"
260 | },
261 | "language_info": {
262 | "codemirror_mode": {
263 | "name": "ipython",
264 | "version": 3
265 | },
266 | "file_extension": ".py",
267 | "mimetype": "text/x-python",
268 | "name": "python",
269 | "nbconvert_exporter": "python",
270 | "pygments_lexer": "ipython3",
271 | "version": "3.5.3"
272 | }
273 | },
274 | "nbformat": 4,
275 | "nbformat_minor": 2
276 | }
277 |
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/die.ogg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/die.ogg
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/die.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/die.wav
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/hit.ogg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/hit.ogg
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/hit.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/hit.wav
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/point.ogg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/point.ogg
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/point.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/point.wav
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/swoosh.ogg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/swoosh.ogg
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/swoosh.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/swoosh.wav
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/wing.ogg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/wing.ogg
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/audio/wing.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/audio/wing.wav
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/0.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/1.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/2.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/3.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/4.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/5.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/6.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/7.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/8.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/9.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/background-black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/background-black.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/base.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/base.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/pipe-green.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/pipe-green.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/redbird-downflap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/redbird-downflap.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/redbird-midflap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/redbird-midflap.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/assets/sprites/redbird-upflap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/assets/sprites/redbird-upflap.png
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/deep_q_network.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import print_function
3 |
4 | import tensorflow as tf
5 | import sys
6 | sys.path.append("game/")
7 | import wrapped_flappy_bird as game
8 | import random
9 | import numpy as np
10 | from collections import deque
11 |
12 | GAME = 'bird' # the name of the game being played for log files
13 | ACTIONS = 2 # number of valid actions
14 | GAMMA = 0.99 # decay rate of past observations
15 | OBSERVE = 1000. # timesteps to observe before training
16 | EXPLORE = 2000000. # frames over which to anneal epsilon
17 | FINAL_EPSILON = 0.0001 # final value of epsilon
18 | INITIAL_EPSILON = 0.1 # starting value of epsilon
19 | REPLAY_MEMORY = 50000 # number of previous transitions to remember
20 | BATCH = 32 # size of minibatch
21 | FRAME_PER_ACTION = 1
22 |
23 | def weight_variable(shape):
24 | initial = tf.truncated_normal(shape, stddev = 0.01)
25 | return tf.Variable(initial)
26 |
27 | def bias_variable(shape):
28 | initial = tf.constant(0.01, shape = shape)
29 | return tf.Variable(initial)
30 |
31 | def conv2d(x, W, stride):
32 | return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
33 |
34 | def max_pool_2x2(x):
35 | return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
36 |
37 | def createNetwork():
38 | # network weights
39 | W_conv1 = weight_variable([8, 8, 4, 32])
40 | b_conv1 = bias_variable([32])
41 |
42 | W_conv2 = weight_variable([4, 4, 32, 64])
43 | b_conv2 = bias_variable([64])
44 |
45 | W_conv3 = weight_variable([3, 3, 64, 64])
46 | b_conv3 = bias_variable([64])
47 |
48 | W_fc1 = weight_variable([1600, 512])
49 | b_fc1 = bias_variable([512])
50 |
51 | W_fc2 = weight_variable([512, ACTIONS])
52 | b_fc2 = bias_variable([ACTIONS])
53 |
54 | # input layer
55 | s = tf.placeholder("float", [None, 80, 80, 4])
56 |
57 | # hidden layers
58 | h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
59 | h_pool1 = max_pool_2x2(h_conv1)
60 |
61 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
62 | #h_pool2 = max_pool_2x2(h_conv2)
63 |
64 | h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
65 | #h_pool3 = max_pool_2x2(h_conv3)
66 |
67 | #h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
68 | h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
69 |
70 | h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
71 |
72 | # readout layer
73 | readout = tf.matmul(h_fc1, W_fc2) + b_fc2
74 |
75 | return s, readout, h_fc1
76 |
77 | def prepare_img(x_t):
78 | x_t = tf.image.rgb_to_grayscale(x_t)
79 | x_t = tf.image.resize_images(x_t,[80,80])
80 | return x_t.eval()
81 |
82 | def trainNetwork(s, readout, h_fc1, sess):
83 | # define the cost function
84 | a = tf.placeholder("float", [None, ACTIONS])
85 | y = tf.placeholder("float", [None])
86 | readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)
87 | cost = tf.reduce_mean(tf.square(y - readout_action))
88 | train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
89 |
90 | # open up a game state to communicate with emulator
91 | game_state = game.GameState()
92 |
93 | # store the previous observations in replay memory
94 | D = deque()
95 |
96 | # printing
97 | a_file = open("logs_" + GAME + "/readout.txt", 'w')
98 | h_file = open("logs_" + GAME + "/hidden.txt", 'w')
99 |
100 | # get the first state by doing nothing and preprocess the image to 80x80x4
101 | do_nothing = np.zeros(ACTIONS)
102 | do_nothing[0] = 1
103 | x_t, r_0, terminal = game_state.frame_step(do_nothing)
104 | x_t = prepare_img(x_t)
105 | s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)[0]
106 |
107 | # saving and loading networks
108 | saver = tf.train.Saver()
109 | sess.run(tf.initialize_all_variables())
110 | checkpoint = tf.train.get_checkpoint_state("saved_networks")
111 | if checkpoint and checkpoint.model_checkpoint_path:
112 | saver.restore(sess, checkpoint.model_checkpoint_path)
113 | print("Successfully loaded:", checkpoint.model_checkpoint_path)
114 | else:
115 | print("Could not find old network weights")
116 |
117 | # start training
118 | epsilon = INITIAL_EPSILON
119 | t = 0
120 | while "flappy bird" != "angry bird":
121 | # choose an action epsilon greedily
122 | readout_t = readout.eval(feed_dict={s : [s_t]})[0]
123 | a_t = np.zeros([ACTIONS])
124 | action_index = 0
125 | if t % FRAME_PER_ACTION == 0:
126 | if random.random() <= epsilon:
127 | print("----------Random Action----------")
128 | action_index = random.randrange(ACTIONS)
129 | a_t[random.randrange(ACTIONS)] = 1
130 | else:
131 | action_index = np.argmax(readout_t)
132 | a_t[action_index] = 1
133 | else:
134 | a_t[0] = 1 # do nothing
135 |
136 | # scale down epsilon
137 | if epsilon > FINAL_EPSILON and t > OBSERVE:
138 | epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
139 |
140 | # run the selected action and observe next state and reward
141 | x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
142 | x_t1 = x_t = prepare_img(x_t)
143 | #s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)
144 | s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)
145 |
146 | # store the transition in D
147 | D.append((s_t, a_t, r_t, s_t1, terminal))
148 | if len(D) > REPLAY_MEMORY:
149 | D.popleft()
150 |
151 | # only train if done observing
152 | if t > OBSERVE:
153 | # sample a minibatch to train on
154 | minibatch = random.sample(D, BATCH)
155 |
156 | # get the batch variables
157 | s_j_batch = [d[0] for d in minibatch]
158 | a_batch = [d[1] for d in minibatch]
159 | r_batch = [d[2] for d in minibatch]
160 | s_j1_batch = [d[3] for d in minibatch]
161 |
162 | y_batch = []
163 | readout_j1_batch = readout.eval(feed_dict = {s : s_j1_batch})
164 | for i in range(0, len(minibatch)):
165 | terminal = minibatch[i][4]
166 | # if terminal, only equals reward
167 | if terminal:
168 | y_batch.append(r_batch[i])
169 | else:
170 | y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))
171 |
172 | # perform gradient step
173 | train_step.run(feed_dict = {
174 | y : y_batch,
175 | a : a_batch,
176 | s : s_j_batch}
177 | )
178 |
179 | # update the old values
180 | s_t = s_t1
181 | t += 1
182 |
183 | # save progress every 10000 iterations
184 | if t % 10000 == 0:
185 | saver.save(sess, 'saved_networks/' + GAME + '-dqn', global_step = t)
186 |
187 | # print info
188 | state = ""
189 | if t <= OBSERVE:
190 | state = "observe"
191 | elif t > OBSERVE and t <= OBSERVE + EXPLORE:
192 | state = "explore"
193 | else:
194 | state = "train"
195 |
196 | print("TIMESTEP", t, "/ STATE", state, \
197 | "/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
198 | "/ Q_MAX %e" % np.max(readout_t))
199 | # write info to files
200 | '''
201 | if t % 10000 <= 100:
202 | a_file.write(",".join([str(x) for x in readout_t]) + '\n')
203 | h_file.write(",".join([str(x) for x in h_fc1.eval(feed_dict={s:[s_t]})[0]]) + '\n')
204 | cv2.imwrite("logs_tetris/frame" + str(t) + ".png", x_t1)
205 | '''
206 |
207 | def playGame():
208 | sess = tf.InteractiveSession()
209 | s, readout, h_fc1 = createNetwork()
210 | trainNetwork(s, readout, h_fc1, sess)
211 |
212 | def main():
213 | playGame()
214 |
215 | if __name__ == "__main__":
216 | main()
217 |
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/game/__pycache__/flappy_bird_utils.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/game/__pycache__/flappy_bird_utils.cpython-35.pyc
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/game/__pycache__/wrapped_flappy_bird.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/game/__pycache__/wrapped_flappy_bird.cpython-35.pyc
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/game/flappy_bird_utils.py:
--------------------------------------------------------------------------------
1 | import pygame
2 | import sys
3 | def load():
4 | # path of player with different states
5 | PLAYER_PATH = (
6 | 'assets/sprites/redbird-upflap.png',
7 | 'assets/sprites/redbird-midflap.png',
8 | 'assets/sprites/redbird-downflap.png'
9 | )
10 |
11 | # path of background
12 | BACKGROUND_PATH = 'assets/sprites/background-black.png'
13 |
14 | # path of pipe
15 | PIPE_PATH = 'assets/sprites/pipe-green.png'
16 |
17 | IMAGES, SOUNDS, HITMASKS = {}, {}, {}
18 |
19 | # numbers sprites for score display
20 | IMAGES['numbers'] = (
21 | pygame.image.load('assets/sprites/0.png').convert_alpha(),
22 | pygame.image.load('assets/sprites/1.png').convert_alpha(),
23 | pygame.image.load('assets/sprites/2.png').convert_alpha(),
24 | pygame.image.load('assets/sprites/3.png').convert_alpha(),
25 | pygame.image.load('assets/sprites/4.png').convert_alpha(),
26 | pygame.image.load('assets/sprites/5.png').convert_alpha(),
27 | pygame.image.load('assets/sprites/6.png').convert_alpha(),
28 | pygame.image.load('assets/sprites/7.png').convert_alpha(),
29 | pygame.image.load('assets/sprites/8.png').convert_alpha(),
30 | pygame.image.load('assets/sprites/9.png').convert_alpha()
31 | )
32 |
33 | # base (ground) sprite
34 | IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
35 |
36 | # sounds
37 | if 'win' in sys.platform:
38 | soundExt = '.wav'
39 | else:
40 | soundExt = '.ogg'
41 |
42 | SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
43 | SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
44 | SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
45 | SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
46 | SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
47 |
48 | # select random background sprites
49 | IMAGES['background'] = pygame.image.load(BACKGROUND_PATH).convert()
50 |
51 | # select random player sprites
52 | IMAGES['player'] = (
53 | pygame.image.load(PLAYER_PATH[0]).convert_alpha(),
54 | pygame.image.load(PLAYER_PATH[1]).convert_alpha(),
55 | pygame.image.load(PLAYER_PATH[2]).convert_alpha(),
56 | )
57 |
58 | # select random pipe sprites
59 | IMAGES['pipe'] = (
60 | pygame.transform.rotate(
61 | pygame.image.load(PIPE_PATH).convert_alpha(), 180),
62 | pygame.image.load(PIPE_PATH).convert_alpha(),
63 | )
64 |
65 | # hismask for pipes
66 | HITMASKS['pipe'] = (
67 | getHitmask(IMAGES['pipe'][0]),
68 | getHitmask(IMAGES['pipe'][1]),
69 | )
70 |
71 | # hitmask for player
72 | HITMASKS['player'] = (
73 | getHitmask(IMAGES['player'][0]),
74 | getHitmask(IMAGES['player'][1]),
75 | getHitmask(IMAGES['player'][2]),
76 | )
77 |
78 | return IMAGES, SOUNDS, HITMASKS
79 |
80 | def getHitmask(image):
81 | """returns a hitmask using an image's alpha."""
82 | mask = []
83 | for x in range(image.get_width()):
84 | mask.append([])
85 | for y in range(image.get_height()):
86 | mask[x].append(bool(image.get_at((x,y))[3]))
87 | return mask
88 |
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/game/wrapped_flappy_bird.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sys
3 | import random
4 | import pygame
5 | import flappy_bird_utils
6 | import pygame.surfarray as surfarray
7 | from pygame.locals import *
8 | from itertools import cycle
9 |
10 | FPS = 30
11 | SCREENWIDTH = 288
12 | SCREENHEIGHT = 512
13 |
14 | pygame.init()
15 | FPSCLOCK = pygame.time.Clock()
16 | SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
17 | pygame.display.set_caption('Flappy Bird')
18 |
19 | IMAGES, SOUNDS, HITMASKS = flappy_bird_utils.load()
20 | PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
21 | BASEY = SCREENHEIGHT * 0.79
22 |
23 | PLAYER_WIDTH = IMAGES['player'][0].get_width()
24 | PLAYER_HEIGHT = IMAGES['player'][0].get_height()
25 | PIPE_WIDTH = IMAGES['pipe'][0].get_width()
26 | PIPE_HEIGHT = IMAGES['pipe'][0].get_height()
27 | BACKGROUND_WIDTH = IMAGES['background'].get_width()
28 |
29 | PLAYER_INDEX_GEN = cycle([0, 1, 2, 1])
30 |
31 |
32 | class GameState:
33 | def __init__(self):
34 | self.score = self.playerIndex = self.loopIter = 0
35 | self.playerx = int(SCREENWIDTH * 0.2)
36 | self.playery = int((SCREENHEIGHT - PLAYER_HEIGHT) / 2)
37 | self.basex = 0
38 | self.baseShift = IMAGES['base'].get_width() - BACKGROUND_WIDTH
39 |
40 | newPipe1 = getRandomPipe()
41 | newPipe2 = getRandomPipe()
42 | self.upperPipes = [
43 | {'x': SCREENWIDTH, 'y': newPipe1[0]['y']},
44 | {'x': SCREENWIDTH + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
45 | ]
46 | self.lowerPipes = [
47 | {'x': SCREENWIDTH, 'y': newPipe1[1]['y']},
48 | {'x': SCREENWIDTH + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
49 | ]
50 |
51 | # player velocity, max velocity, downward accleration, accleration on flap
52 | self.pipeVelX = -4
53 | self.playerVelY = 0 # player's velocity along Y, default same as playerFlapped
54 | self.playerMaxVelY = 10 # max vel along Y, max descend speed
55 | self.playerMinVelY = -8 # min vel along Y, max ascend speed
56 | self.playerAccY = 1 # players downward accleration
57 | self.playerFlapAcc = -9 # players speed on flapping
58 | self.playerFlapped = False # True when player flaps
59 |
60 | def frame_step(self, input_actions):
61 | pygame.event.pump()
62 |
63 | reward = 0.1
64 | terminal = False
65 |
66 | if sum(input_actions) != 1:
67 | raise ValueError('Multiple input actions!')
68 |
69 | # input_actions[0] == 1: do nothing
70 | # input_actions[1] == 1: flap the bird
71 | if input_actions[1] == 1:
72 | if self.playery > -2 * PLAYER_HEIGHT:
73 | self.playerVelY = self.playerFlapAcc
74 | self.playerFlapped = True
75 | #SOUNDS['wing'].play()
76 |
77 | # check for score
78 | playerMidPos = self.playerx + PLAYER_WIDTH / 2
79 | for pipe in self.upperPipes:
80 | pipeMidPos = pipe['x'] + PIPE_WIDTH / 2
81 | if pipeMidPos <= playerMidPos < pipeMidPos + 4:
82 | self.score += 1
83 | #SOUNDS['point'].play()
84 | reward = 1
85 |
86 | # playerIndex basex change
87 | if (self.loopIter + 1) % 3 == 0:
88 | self.playerIndex = next(PLAYER_INDEX_GEN)
89 | self.loopIter = (self.loopIter + 1) % 30
90 | self.basex = -((-self.basex + 100) % self.baseShift)
91 |
92 | # player's movement
93 | if self.playerVelY < self.playerMaxVelY and not self.playerFlapped:
94 | self.playerVelY += self.playerAccY
95 | if self.playerFlapped:
96 | self.playerFlapped = False
97 | self.playery += min(self.playerVelY, BASEY - self.playery - PLAYER_HEIGHT)
98 | if self.playery < 0:
99 | self.playery = 0
100 |
101 | # move pipes to left
102 | for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
103 | uPipe['x'] += self.pipeVelX
104 | lPipe['x'] += self.pipeVelX
105 |
106 | # add new pipe when first pipe is about to touch left of screen
107 | if 0 < self.upperPipes[0]['x'] < 5:
108 | newPipe = getRandomPipe()
109 | self.upperPipes.append(newPipe[0])
110 | self.lowerPipes.append(newPipe[1])
111 |
112 | # remove first pipe if its out of the screen
113 | if self.upperPipes[0]['x'] < -PIPE_WIDTH:
114 | self.upperPipes.pop(0)
115 | self.lowerPipes.pop(0)
116 |
117 | # check if crash here
118 | isCrash= checkCrash({'x': self.playerx, 'y': self.playery,
119 | 'index': self.playerIndex},
120 | self.upperPipes, self.lowerPipes)
121 | if isCrash:
122 | #SOUNDS['hit'].play()
123 | #SOUNDS['die'].play()
124 | terminal = True
125 | self.__init__()
126 | reward = -1
127 |
128 | # draw sprites
129 | SCREEN.blit(IMAGES['background'], (0,0))
130 |
131 | for uPipe, lPipe in zip(self.upperPipes, self.lowerPipes):
132 | SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
133 | SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
134 |
135 | SCREEN.blit(IMAGES['base'], (self.basex, BASEY))
136 | # print score so player overlaps the score
137 | # showScore(self.score)
138 | SCREEN.blit(IMAGES['player'][self.playerIndex],
139 | (self.playerx, self.playery))
140 |
141 | image_data = pygame.surfarray.array3d(pygame.display.get_surface())
142 | pygame.display.update()
143 | FPSCLOCK.tick(FPS)
144 | #print self.upperPipes[0]['y'] + PIPE_HEIGHT - int(BASEY * 0.2)
145 | return image_data, reward, terminal
146 |
147 | def getRandomPipe():
148 | """returns a randomly generated pipe"""
149 | # y of gap between upper and lower pipe
150 | gapYs = [20, 30, 40, 50, 60, 70, 80, 90]
151 | index = random.randint(0, len(gapYs)-1)
152 | gapY = gapYs[index]
153 |
154 | gapY += int(BASEY * 0.2)
155 | pipeX = SCREENWIDTH + 10
156 |
157 | return [
158 | {'x': pipeX, 'y': gapY - PIPE_HEIGHT}, # upper pipe
159 | {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
160 | ]
161 |
162 |
163 | def showScore(score):
164 | """displays score in center of screen"""
165 | scoreDigits = [int(x) for x in list(str(score))]
166 | totalWidth = 0 # total width of all numbers to be printed
167 |
168 | for digit in scoreDigits:
169 | totalWidth += IMAGES['numbers'][digit].get_width()
170 |
171 | Xoffset = (SCREENWIDTH - totalWidth) / 2
172 |
173 | for digit in scoreDigits:
174 | SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
175 | Xoffset += IMAGES['numbers'][digit].get_width()
176 |
177 |
178 | def checkCrash(player, upperPipes, lowerPipes):
179 | """returns True if player collders with base or pipes."""
180 | pi = player['index']
181 | player['w'] = IMAGES['player'][0].get_width()
182 | player['h'] = IMAGES['player'][0].get_height()
183 |
184 | # if player crashes into ground
185 | if player['y'] + player['h'] >= BASEY - 1:
186 | return True
187 | else:
188 |
189 | playerRect = pygame.Rect(player['x'], player['y'],
190 | player['w'], player['h'])
191 |
192 | for uPipe, lPipe in zip(upperPipes, lowerPipes):
193 | # upper and lower pipe rects
194 | uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], PIPE_WIDTH, PIPE_HEIGHT)
195 | lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], PIPE_WIDTH, PIPE_HEIGHT)
196 |
197 | # player and upper/lower pipe hitmasks
198 | pHitMask = HITMASKS['player'][pi]
199 | uHitmask = HITMASKS['pipe'][0]
200 | lHitmask = HITMASKS['pipe'][1]
201 |
202 | # if bird collided with upipe or lpipe
203 | uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
204 | lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
205 |
206 | if uCollide or lCollide:
207 | return True
208 |
209 | return False
210 |
211 | def pixelCollision(rect1, rect2, hitmask1, hitmask2):
212 | """Checks if two objects collide and not just their rects"""
213 | rect = rect1.clip(rect2)
214 |
215 | if rect.width == 0 or rect.height == 0:
216 | return False
217 |
218 | x1, y1 = rect.x - rect1.x, rect.y - rect1.y
219 | x2, y2 = rect.x - rect2.x, rect.y - rect2.y
220 |
221 | for x in range(rect.width):
222 | for y in range(rect.height):
223 | if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
224 | return True
225 | return False
226 |
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/logs_bird/hidden.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/logs_bird/hidden.txt
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/logs_bird/readout.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/logs_bird/readout.txt
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.data-00000-of-00001
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.index
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.meta:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab09_Deep_Q_Learning/saved_networks/bird-dqn-2450000.meta
--------------------------------------------------------------------------------
/Lab09_Deep_Q_Learning/saved_networks/checkpoint:
--------------------------------------------------------------------------------
1 | model_checkpoint_path: "bird-dqn-2450000"
2 | all_model_checkpoint_paths: "bird-dqn-2410000"
3 | all_model_checkpoint_paths: "bird-dqn-2420000"
4 | all_model_checkpoint_paths: "bird-dqn-2430000"
5 | all_model_checkpoint_paths: "bird-dqn-2440000"
6 | all_model_checkpoint_paths: "bird-dqn-2450000"
7 |
--------------------------------------------------------------------------------
/Lab10_Recurrent_NeuralNetwork/.ipynb_checkpoints/Tumbon-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import, division, print_function\n",
12 | "\n",
13 | "import os\n",
14 | "from six import moves\n",
15 | "import ssl\n",
16 | "\n",
17 | "import tflearn\n",
18 | "from tflearn.data_utils import *"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {
25 | "collapsed": false
26 | },
27 | "outputs": [],
28 | "source": [
29 | "path = \"US_Cities.txt\"\n",
30 | "maxlen = 20\n",
31 | "string_utf8 = open(path, \"r\",encoding='utf-8').read()\n",
32 | "X, Y, char_idx = string_to_semi_redundant_sequences(string_utf8, seq_maxlen=maxlen, redun_step=3)"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "collapsed": false
40 | },
41 | "outputs": [],
42 | "source": [
43 | "print(X[0])\n",
44 | "print(Y[0])\n",
45 | "print(char_idx)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {
52 | "collapsed": false
53 | },
54 | "outputs": [],
55 | "source": [
56 | "g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])\n",
57 | "g = tflearn.lstm(g, 512, return_seq=True)\n",
58 | "g = tflearn.dropout(g, 0.5)\n",
59 | "g = tflearn.lstm(g, 512)\n",
60 | "g = tflearn.dropout(g, 0.5)\n",
61 | "g = tflearn.fully_connected(g, len(char_idx), activation='softmax')\n",
62 | "g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',\n",
63 | " learning_rate=0.001)\n",
64 | "\n",
65 | "m = tflearn.SequenceGenerator(g, dictionary=char_idx,\n",
66 | " seq_maxlen=maxlen,\n",
67 | " clip_gradients=5.0,\n",
68 | " checkpoint_path='model_us_cities')\n"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {
75 | "collapsed": false
76 | },
77 | "outputs": [],
78 | "source": [
79 | "seed = random_sequence_from_string(string_utf8, maxlen)\n",
80 | "m.fit(X, Y, validation_set=0.1, batch_size=128,n_epoch=1, run_id='us_cities')\n",
81 | "print(\"-- TESTING...\")\n",
82 | "print(\"-- Test with temperature of 1.2 --\")\n",
83 | "print(m.generate(30, temperature=1.2, seq_seed=seed).encode('utf-8'))\n",
84 | "print(\"-- Test with temperature of 1.0 --\")\n",
85 | "print(m.generate(30, temperature=1.0, seq_seed=seed).encode('utf-8'))\n",
86 | "print(\"-- Test with temperature of 0.5 --\")\n",
87 | "print(m.generate(30, temperature=0.5, seq_seed=seed).encode('utf-8'))"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {
94 | "collapsed": false
95 | },
96 | "outputs": [],
97 | "source": [
98 | "print(m.generate(100, temperature=1.0, seq_seed=seed))"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "collapsed": true
106 | },
107 | "outputs": [],
108 | "source": []
109 | }
110 | ],
111 | "metadata": {
112 | "kernelspec": {
113 | "display_name": "Tensorflow 3",
114 | "language": "python",
115 | "name": "tensorflow"
116 | },
117 | "language_info": {
118 | "codemirror_mode": {
119 | "name": "ipython",
120 | "version": 3
121 | },
122 | "file_extension": ".py",
123 | "mimetype": "text/x-python",
124 | "name": "python",
125 | "nbconvert_exporter": "python",
126 | "pygments_lexer": "ipython3",
127 | "version": "3.5.3"
128 | }
129 | },
130 | "nbformat": 4,
131 | "nbformat_minor": 2
132 | }
133 |
--------------------------------------------------------------------------------
/Lab10_Recurrent_NeuralNetwork/LABXX_Basic_RNN.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 5,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [
10 | {
11 | "name": "stdout",
12 | "output_type": "stream",
13 | "text": [
14 | "Extracting ../data\\train-images-idx3-ubyte.gz\n",
15 | "Extracting ../data\\train-labels-idx1-ubyte.gz\n",
16 | "Extracting ../data\\t10k-images-idx3-ubyte.gz\n",
17 | "Extracting ../data\\t10k-labels-idx1-ubyte.gz\n"
18 | ]
19 | }
20 | ],
21 | "source": [
22 | "import tensorflow as tf\n",
23 | "from tensorflow.contrib import rnn\n",
24 | "import numpy as np\n",
25 | "\n",
26 | "# Import MINST data\n",
27 | "from tensorflow.examples.tutorials.mnist import input_data\n",
28 | "mnist = input_data.read_data_sets(\"../data\", one_hot=True)"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 2,
34 | "metadata": {
35 | "collapsed": true
36 | },
37 | "outputs": [],
38 | "source": [
39 | "# Parameters\n",
40 | "learning_rate = 0.001\n",
41 | "training_iters = 100000\n",
42 | "batch_size = 128\n",
43 | "display_step = 10\n",
44 | "\n",
45 | "# Network Parameters\n",
46 | "n_input = 28 # MNIST data input (img shape: 28*28)\n",
47 | "n_steps = 28 # timesteps\n",
48 | "n_hidden = 128 # hidden layer num of features\n",
49 | "n_classes = 10 # MNIST total classes (0-9 digits)\n",
50 | "\n",
51 | "# tf Graph input\n",
52 | "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
53 | "y = tf.placeholder(\"float\", [None, n_classes])\n",
54 | "\n",
55 | "# Define weights\n",
56 | "weights = {\n",
57 | " 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n",
58 | "}\n",
59 | "biases = {\n",
60 | " 'out': tf.Variable(tf.random_normal([n_classes]))\n",
61 | "}"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": 3,
67 | "metadata": {
68 | "collapsed": false
69 | },
70 | "outputs": [],
71 | "source": [
72 | "def RNN(x, weights, biases):\n",
73 | "\n",
74 | "# configuration\n",
75 | "# O * W + b -> 10 labels for each image, O[? 28], W[28 10], B[10]\n",
76 | "# ^ (O: output 28 vec from 28 vec input)\n",
77 | "# |\n",
78 | "# +-+ +-+ +--+\n",
79 | "# |1|->|2|-> ... |28| time_step_size = 28\n",
80 | "# +-+ +-+ +--+\n",
81 | "# ^ ^ ... ^\n",
82 | "# | | |\n",
83 | "# [28] [28] ... [28]\n",
84 | "# [28] [28] ... [28]\n",
85 | "# [28] [28] ... [28]\n",
86 | " \n",
87 | " x = tf.unstack(x, n_steps, 1)\n",
88 | " \n",
89 | " cell = rnn.BasicRNNCell(n_hidden)\n",
90 | " \n",
91 | " outputs, states = rnn.static_rnn(cell, x, dtype=tf.float32)\n",
92 | " # Linear activation, using rnn inner loop last output\n",
93 | " return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
94 | "\n",
95 | "pred = RNN(x, weights, biases)\n",
96 | "\n",
97 | "# Define loss and optimizer\n",
98 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
99 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
100 | "\n",
101 | "# Evaluate model\n",
102 | "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
103 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
104 | "\n",
105 | "# Initializing the variables\n",
106 | "init = tf.global_variables_initializer()"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 4,
112 | "metadata": {
113 | "collapsed": false
114 | },
115 | "outputs": [
116 | {
117 | "name": "stdout",
118 | "output_type": "stream",
119 | "text": [
120 | "Iter 1280, Minibatch Loss= 2.018440, Training Accuracy= 0.28125\n",
121 | "Iter 2560, Minibatch Loss= 1.724001, Training Accuracy= 0.35156\n",
122 | "Iter 3840, Minibatch Loss= 1.426400, Training Accuracy= 0.50781\n",
123 | "Iter 5120, Minibatch Loss= 1.109406, Training Accuracy= 0.62500\n",
124 | "Iter 6400, Minibatch Loss= 0.859597, Training Accuracy= 0.71875\n",
125 | "Iter 7680, Minibatch Loss= 1.155417, Training Accuracy= 0.57812\n",
126 | "Iter 8960, Minibatch Loss= 0.807782, Training Accuracy= 0.76562\n",
127 | "Iter 10240, Minibatch Loss= 0.681562, Training Accuracy= 0.78125\n",
128 | "Iter 11520, Minibatch Loss= 0.434009, Training Accuracy= 0.84375\n",
129 | "Iter 12800, Minibatch Loss= 0.759220, Training Accuracy= 0.76562\n",
130 | "Iter 14080, Minibatch Loss= 0.554060, Training Accuracy= 0.85938\n",
131 | "Iter 15360, Minibatch Loss= 0.350382, Training Accuracy= 0.87500\n",
132 | "Iter 16640, Minibatch Loss= 0.435002, Training Accuracy= 0.89844\n",
133 | "Iter 17920, Minibatch Loss= 0.275802, Training Accuracy= 0.92188\n",
134 | "Iter 19200, Minibatch Loss= 0.300505, Training Accuracy= 0.89062\n",
135 | "Iter 20480, Minibatch Loss= 0.198208, Training Accuracy= 0.92188\n",
136 | "Iter 21760, Minibatch Loss= 0.444818, Training Accuracy= 0.83594\n",
137 | "Iter 23040, Minibatch Loss= 0.161764, Training Accuracy= 0.96094\n",
138 | "Iter 24320, Minibatch Loss= 0.367215, Training Accuracy= 0.83594\n",
139 | "Iter 25600, Minibatch Loss= 0.388027, Training Accuracy= 0.87500\n",
140 | "Iter 26880, Minibatch Loss= 0.242736, Training Accuracy= 0.91406\n",
141 | "Iter 28160, Minibatch Loss= 0.239943, Training Accuracy= 0.92188\n",
142 | "Iter 29440, Minibatch Loss= 0.278329, Training Accuracy= 0.92969\n",
143 | "Iter 30720, Minibatch Loss= 0.284431, Training Accuracy= 0.89844\n",
144 | "Iter 32000, Minibatch Loss= 0.197911, Training Accuracy= 0.93750\n",
145 | "Iter 33280, Minibatch Loss= 0.255737, Training Accuracy= 0.91406\n",
146 | "Iter 34560, Minibatch Loss= 0.221531, Training Accuracy= 0.92969\n",
147 | "Iter 35840, Minibatch Loss= 0.195205, Training Accuracy= 0.92969\n",
148 | "Iter 37120, Minibatch Loss= 0.342426, Training Accuracy= 0.89844\n",
149 | "Iter 38400, Minibatch Loss= 0.112141, Training Accuracy= 0.97656\n",
150 | "Iter 39680, Minibatch Loss= 0.142680, Training Accuracy= 0.95312\n",
151 | "Iter 40960, Minibatch Loss= 0.368973, Training Accuracy= 0.85938\n",
152 | "Iter 42240, Minibatch Loss= 0.113069, Training Accuracy= 0.97656\n",
153 | "Iter 43520, Minibatch Loss= 0.134389, Training Accuracy= 0.96094\n",
154 | "Iter 44800, Minibatch Loss= 0.156187, Training Accuracy= 0.95312\n",
155 | "Iter 46080, Minibatch Loss= 0.112163, Training Accuracy= 0.96094\n",
156 | "Iter 47360, Minibatch Loss= 0.263477, Training Accuracy= 0.90625\n",
157 | "Iter 48640, Minibatch Loss= 0.274484, Training Accuracy= 0.89844\n",
158 | "Iter 49920, Minibatch Loss= 0.258205, Training Accuracy= 0.90625\n",
159 | "Iter 51200, Minibatch Loss= 0.132311, Training Accuracy= 0.95312\n",
160 | "Iter 52480, Minibatch Loss= 0.193309, Training Accuracy= 0.94531\n",
161 | "Iter 53760, Minibatch Loss= 0.056192, Training Accuracy= 0.98438\n",
162 | "Iter 55040, Minibatch Loss= 0.123685, Training Accuracy= 0.94531\n",
163 | "Iter 56320, Minibatch Loss= 0.202632, Training Accuracy= 0.94531\n",
164 | "Iter 57600, Minibatch Loss= 0.165385, Training Accuracy= 0.96875\n",
165 | "Iter 58880, Minibatch Loss= 0.174151, Training Accuracy= 0.93750\n",
166 | "Iter 60160, Minibatch Loss= 0.240011, Training Accuracy= 0.89062\n",
167 | "Iter 61440, Minibatch Loss= 0.257696, Training Accuracy= 0.89844\n",
168 | "Iter 62720, Minibatch Loss= 0.287447, Training Accuracy= 0.92969\n",
169 | "Iter 64000, Minibatch Loss= 0.175233, Training Accuracy= 0.93750\n",
170 | "Iter 65280, Minibatch Loss= 0.078478, Training Accuracy= 0.97656\n",
171 | "Iter 66560, Minibatch Loss= 0.087357, Training Accuracy= 0.96875\n",
172 | "Iter 67840, Minibatch Loss= 0.136066, Training Accuracy= 0.95312\n",
173 | "Iter 69120, Minibatch Loss= 0.076882, Training Accuracy= 0.97656\n",
174 | "Iter 70400, Minibatch Loss= 0.147048, Training Accuracy= 0.94531\n",
175 | "Iter 71680, Minibatch Loss= 0.113056, Training Accuracy= 0.96094\n",
176 | "Iter 72960, Minibatch Loss= 0.092824, Training Accuracy= 0.98438\n",
177 | "Iter 74240, Minibatch Loss= 0.108782, Training Accuracy= 0.97656\n",
178 | "Iter 75520, Minibatch Loss= 0.123648, Training Accuracy= 0.96875\n",
179 | "Iter 76800, Minibatch Loss= 0.140416, Training Accuracy= 0.94531\n",
180 | "Iter 78080, Minibatch Loss= 0.094538, Training Accuracy= 0.96875\n",
181 | "Iter 79360, Minibatch Loss= 0.063751, Training Accuracy= 0.97656\n",
182 | "Iter 80640, Minibatch Loss= 0.159711, Training Accuracy= 0.96875\n",
183 | "Iter 81920, Minibatch Loss= 0.080421, Training Accuracy= 0.97656\n",
184 | "Iter 83200, Minibatch Loss= 0.071442, Training Accuracy= 0.97656\n",
185 | "Iter 84480, Minibatch Loss= 0.067796, Training Accuracy= 0.96875\n",
186 | "Iter 85760, Minibatch Loss= 0.073391, Training Accuracy= 0.97656\n",
187 | "Iter 87040, Minibatch Loss= 0.188386, Training Accuracy= 0.94531\n",
188 | "Iter 88320, Minibatch Loss= 0.194683, Training Accuracy= 0.92969\n",
189 | "Iter 89600, Minibatch Loss= 0.253914, Training Accuracy= 0.92188\n",
190 | "Iter 90880, Minibatch Loss= 0.178000, Training Accuracy= 0.93750\n",
191 | "Iter 92160, Minibatch Loss= 0.100396, Training Accuracy= 0.96094\n",
192 | "Iter 93440, Minibatch Loss= 0.081216, Training Accuracy= 0.96875\n",
193 | "Iter 94720, Minibatch Loss= 0.100157, Training Accuracy= 0.96094\n",
194 | "Iter 96000, Minibatch Loss= 0.094466, Training Accuracy= 0.94531\n",
195 | "Iter 97280, Minibatch Loss= 0.090565, Training Accuracy= 0.98438\n",
196 | "Iter 98560, Minibatch Loss= 0.096530, Training Accuracy= 0.96875\n",
197 | "Iter 99840, Minibatch Loss= 0.101170, Training Accuracy= 0.97656\n",
198 | "Optimization Finished!\n",
199 | "Testing Accuracy: 0.9613\n"
200 | ]
201 | }
202 | ],
203 | "source": [
204 | "# Launch the graph\n",
205 | "with tf.Session() as sess:\n",
206 | " sess.run(init)\n",
207 | " step = 1\n",
208 | " # Keep training until reach max iterations\n",
209 | " while step * batch_size < training_iters:\n",
210 | " batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
211 | " # Reshape data to get 28 seq of 28 elements\n",
212 | " batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n",
213 | " # Run optimization op (backprop)\n",
214 | " sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n",
215 | " if step % display_step == 0:\n",
216 | " # Calculate batch accuracy\n",
217 | " acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n",
218 | " # Calculate batch loss\n",
219 | " loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n",
220 | " print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
221 | " \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
222 | " \"{:.5f}\".format(acc))\n",
223 | " step += 1\n",
224 | " print(\"Optimization Finished!\")\n",
225 | " \n",
226 | " test_data = mnist.test.images.reshape((-1, n_steps, n_input))\n",
227 | " test_label = mnist.test.labels\n",
228 | " print(\"Testing Accuracy:\", \\\n",
229 | " sess.run(accuracy, feed_dict={x: test_data, y: test_label}))"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {
236 | "collapsed": true
237 | },
238 | "outputs": [],
239 | "source": []
240 | }
241 | ],
242 | "metadata": {
243 | "kernelspec": {
244 | "display_name": "Tensorflow 3",
245 | "language": "python",
246 | "name": "tensorflow"
247 | },
248 | "language_info": {
249 | "codemirror_mode": {
250 | "name": "ipython",
251 | "version": 3
252 | },
253 | "file_extension": ".py",
254 | "mimetype": "text/x-python",
255 | "name": "python",
256 | "nbconvert_exporter": "python",
257 | "pygments_lexer": "ipython3",
258 | "version": "3.5.3"
259 | }
260 | },
261 | "nbformat": 4,
262 | "nbformat_minor": 2
263 | }
264 |
--------------------------------------------------------------------------------
/Lab10_Recurrent_NeuralNetwork/LabXX_LSTMCell.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import tensorflow as tf\n",
12 | "from tensorflow.contrib import rnn\n",
13 | "import numpy as np\n",
14 | "\n",
15 | "# Import MINST data\n",
16 | "from tensorflow.examples.tutorials.mnist import input_data\n",
17 | "mnist = input_data.read_data_sets(\"../data\", one_hot=True)"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {
24 | "collapsed": true
25 | },
26 | "outputs": [],
27 | "source": [
28 | "# Parameters\n",
29 | "learning_rate = 0.001\n",
30 | "training_iters = 100000\n",
31 | "batch_size = 128\n",
32 | "display_step = 10\n",
33 | "\n",
34 | "# Network Parameters\n",
35 | "n_input = 28 # MNIST data input (img shape: 28*28)\n",
36 | "n_steps = 28 # timesteps\n",
37 | "n_hidden = 128 # hidden layer num of features\n",
38 | "n_classes = 10 # MNIST total classes (0-9 digits)\n",
39 | "\n",
40 | "# tf Graph input\n",
41 | "x = tf.placeholder(\"float\", [None, n_steps, n_input])\n",
42 | "y = tf.placeholder(\"float\", [None, n_classes])\n",
43 | "\n",
44 | "# Define weights\n",
45 | "weights = {\n",
46 | " 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))\n",
47 | "}\n",
48 | "biases = {\n",
49 | " 'out': tf.Variable(tf.random_normal([n_classes]))\n",
50 | "}"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": true
58 | },
59 | "outputs": [],
60 | "source": [
61 | "def RNN(x, weights, biases):\n",
62 | "\n",
63 | "# configuration\n",
64 | "# O * W + b -> 10 labels for each image, O[? 28], W[28 10], B[10]\n",
65 | "# ^ (O: output 28 vec from 28 vec input)\n",
66 | "# |\n",
67 | "# +-+ +-+ +--+\n",
68 | "# |1|->|2|-> ... |28| time_step_size = 28\n",
69 | "# +-+ +-+ +--+\n",
70 | "# ^ ^ ... ^\n",
71 | "# | | |\n",
72 | "# [28] [28] ... [28]\n",
73 | "# [28] [28] ... [28]\n",
74 | "# [28] [28] ... [28]\n",
75 | " \n",
76 | " x = tf.unstack(x, n_steps, 1)\n",
77 | " \n",
78 | " cell = rnn.BasicLSTMCell(n_hidden,forget_bias=0.1)\n",
79 | " \n",
80 | " outputs, states = rnn.static_rnn(cell, x, dtype=tf.float32)\n",
81 | " # Linear activation, using rnn inner loop last output\n",
82 | " return tf.matmul(outputs[-1], weights['out']) + biases['out']\n",
83 | "\n",
84 | "pred = RNN(x, weights, biases)\n",
85 | "\n",
86 | "# Define loss and optimizer\n",
87 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n",
88 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n",
89 | "\n",
90 | "# Evaluate model\n",
91 | "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n",
92 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
93 | "\n",
94 | "# Initializing the variables\n",
95 | "init = tf.global_variables_initializer()"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {
102 | "collapsed": false
103 | },
104 | "outputs": [],
105 | "source": [
106 | "# Launch the graph\n",
107 | "with tf.Session() as sess:\n",
108 | " sess.run(init)\n",
109 | " step = 1\n",
110 | " # Keep training until reach max iterations\n",
111 | " while step * batch_size < training_iters:\n",
112 | " batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
113 | " # Reshape data to get 28 seq of 28 elements\n",
114 | " batch_x = batch_x.reshape((batch_size, n_steps, n_input))\n",
115 | " # Run optimization op (backprop)\n",
116 | " sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n",
117 | " if step % display_step == 0:\n",
118 | " # Calculate batch accuracy\n",
119 | " acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n",
120 | " # Calculate batch loss\n",
121 | " loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n",
122 | " print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n",
123 | " \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n",
124 | " \"{:.5f}\".format(acc))\n",
125 | " step += 1\n",
126 | " print(\"Optimization Finished!\")\n",
127 | " \n",
128 | " test_data = mnist.test.images.reshape((-1, n_steps, n_input))\n",
129 | " test_label = mnist.test.labels\n",
130 | " print(\"Testing Accuracy:\", \\\n",
131 | " sess.run(accuracy, feed_dict={x: test_data, y: test_label}))"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {
138 | "collapsed": true
139 | },
140 | "outputs": [],
141 | "source": []
142 | }
143 | ],
144 | "metadata": {
145 | "kernelspec": {
146 | "display_name": "Tensorflow 3",
147 | "language": "python",
148 | "name": "tensorflow"
149 | },
150 | "language_info": {
151 | "codemirror_mode": {
152 | "name": "ipython",
153 | "version": 3
154 | },
155 | "file_extension": ".py",
156 | "mimetype": "text/x-python",
157 | "name": "python",
158 | "nbconvert_exporter": "python",
159 | "pygments_lexer": "ipython3",
160 | "version": "3.5.3"
161 | }
162 | },
163 | "nbformat": 4,
164 | "nbformat_minor": 2
165 | }
166 |
--------------------------------------------------------------------------------
/Lab10_Recurrent_NeuralNetwork/Tumbon.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from __future__ import absolute_import, division, print_function\n",
12 | "\n",
13 | "import os\n",
14 | "from six import moves\n",
15 | "import ssl\n",
16 | "\n",
17 | "import tflearn\n",
18 | "from tflearn.data_utils import *"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {
25 | "collapsed": false
26 | },
27 | "outputs": [],
28 | "source": [
29 | "path = \"US_Cities.txt\"\n",
30 | "maxlen = 20\n",
31 | "string_utf8 = open(path, \"r\",encoding='utf-8').read()\n",
32 | "X, Y, char_idx = string_to_semi_redundant_sequences(string_utf8, seq_maxlen=maxlen, redun_step=3)"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "collapsed": false
40 | },
41 | "outputs": [],
42 | "source": [
43 | "print(X[0])\n",
44 | "print(Y[0])\n",
45 | "print(char_idx)"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {
52 | "collapsed": false
53 | },
54 | "outputs": [],
55 | "source": [
56 | "g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])\n",
57 | "g = tflearn.lstm(g, 512, return_seq=True)\n",
58 | "g = tflearn.dropout(g, 0.5)\n",
59 | "g = tflearn.lstm(g, 512)\n",
60 | "g = tflearn.dropout(g, 0.5)\n",
61 | "g = tflearn.fully_connected(g, len(char_idx), activation='softmax')\n",
62 | "g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',\n",
63 | " learning_rate=0.001)\n",
64 | "\n",
65 | "m = tflearn.SequenceGenerator(g, dictionary=char_idx,\n",
66 | " seq_maxlen=maxlen,\n",
67 | " clip_gradients=5.0,\n",
68 | " checkpoint_path='model_us_cities')\n"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": null,
74 | "metadata": {
75 | "collapsed": false
76 | },
77 | "outputs": [],
78 | "source": [
79 | "seed = random_sequence_from_string(string_utf8, maxlen)\n",
80 | "m.fit(X, Y, validation_set=0.1, batch_size=128,n_epoch=1, run_id='us_cities')\n",
81 | "print(\"-- TESTING...\")\n",
82 | "print(\"-- Test with temperature of 1.2 --\")\n",
83 | "print(m.generate(30, temperature=1.2, seq_seed=seed).encode('utf-8'))\n",
84 | "print(\"-- Test with temperature of 1.0 --\")\n",
85 | "print(m.generate(30, temperature=1.0, seq_seed=seed).encode('utf-8'))\n",
86 | "print(\"-- Test with temperature of 0.5 --\")\n",
87 | "print(m.generate(30, temperature=0.5, seq_seed=seed).encode('utf-8'))"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {
94 | "collapsed": false
95 | },
96 | "outputs": [],
97 | "source": [
98 | "print(m.generate(100, temperature=1.0, seq_seed=seed))"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "collapsed": true
106 | },
107 | "outputs": [],
108 | "source": []
109 | }
110 | ],
111 | "metadata": {
112 | "kernelspec": {
113 | "display_name": "Tensorflow 3",
114 | "language": "python",
115 | "name": "tensorflow"
116 | },
117 | "language_info": {
118 | "codemirror_mode": {
119 | "name": "ipython",
120 | "version": 3
121 | },
122 | "file_extension": ".py",
123 | "mimetype": "text/x-python",
124 | "name": "python",
125 | "nbconvert_exporter": "python",
126 | "pygments_lexer": "ipython3",
127 | "version": "3.5.3"
128 | }
129 | },
130 | "nbformat": 4,
131 | "nbformat_minor": 2
132 | }
133 |
--------------------------------------------------------------------------------
/Lab10_Recurrent_NeuralNetwork/images/rnn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lab10_Recurrent_NeuralNetwork/images/rnn.jpg
--------------------------------------------------------------------------------
/Lacture_01_Understand_Machine_Learning/01_Machine_Learning.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/Lacture_01_Understand_Machine_Learning/01_Machine_Learning.pdf
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # BasicMachineLearning
2 | Material for Basic Machine Learning
3 |
4 | [](http://creativecommons.org/licenses/by-nc-sa/4.0/)
5 |
--------------------------------------------------------------------------------
/data/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/data/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/data/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/data/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/data/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/data/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/data/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/comdet/BasicMachineLearning/e08b558b8c55960ec9e51f6be813a4e6bfc63842/data/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------