├── .gitignore
├── .idea
├── deployment.xml
├── other.xml
└── vcs.xml
├── 0.1.Fundamental_Neural_Network
├── ex1
│ ├── .ipynb_checkpoints
│ │ └── ex1-checkpoint.ipynb
│ ├── data
│ │ ├── ex1data1.txt
│ │ └── ex1data2.txt
│ ├── ex1.pdf
│ └── ex1_jemin.ipynb
├── ex2
│ ├── .ipynb_checkpoints
│ │ └── ex2-checkpoint.ipynb
│ ├── data
│ │ ├── ex2data1.txt
│ │ └── ex2data2.txt
│ ├── ex2.pdf
│ └── ex2_jemin.ipynb
├── ex3
│ ├── .ipynb_checkpoints
│ │ └── ex3-checkpoint.ipynb
│ ├── data
│ │ ├── ex3data1.mat
│ │ └── ex3weights.mat
│ ├── ex3.pdf
│ └── ex3_jemin.ipynb
└── ex4
│ ├── data
│ ├── ex4data1.mat
│ └── ex4weights.mat
│ ├── ex4.pdf
│ ├── ex4_jemin.ipynb
│ └── example_fmin_cg.ipynb
├── 0.2.Basic
├── Basic Tutorial.ipynb
├── Basic_usageOfConstant.py
├── Basic_usageOfPlaceholder.py
├── Basic_usageOfVariables.py
├── __init__.py
├── basic.py
├── hellowTensorFlow.py
└── log_simple_stats
│ ├── events.out.tfevents.1485839642.jemin-desktop
│ ├── events.out.tfevents.1485839723.jemin-desktop
│ ├── events.out.tfevents.1485839766.jemin-desktop
│ ├── events.out.tfevents.1485839778.jemin-desktop
│ ├── events.out.tfevents.1485839840.jemin-desktop
│ └── events.out.tfevents.1485840189.jemin-desktop
├── 1.Linear Regression
├── LinearRegression.py
├── __init__.py
├── multiFeaturesTrain.txt
└── multiVariableLinearRegression.py
├── 10.Transfer Learning
└── Transfer Learning.ipynb
├── 2.Logistic Classification
├── LogisticRegressionBasic.py
├── SoftmaxClassification.py
├── __init__.py
├── logisticTrain.txt
└── softmaxTrain.txt
├── 3.XOR
├── XORtrain.txt
├── XORwithLogisticRegression.py
├── XORwithNN.py
└── __init__.py
├── 4.MNIST
├── DNN_DropoutForMNIST.py
├── DNNforMNIST.py
├── MNIST_DATA
│ ├── t10k-images-idx3-ubyte.gz
│ ├── t10k-labels-idx1-ubyte.gz
│ ├── train-images-idx3-ubyte.gz
│ └── train-labels-idx1-ubyte.gz
├── MNIST_Tutorial_DNN.ipynb
├── SoftmaxClassificationMNIST.py
├── __init__.py
├── googleLayers.py
└── input_data.py
├── 5.CNN
├── CNNforMNIST-tensorboard.ipynb
├── CNNforMNIST.ipynb
├── CNNforMNIST.py
├── CNNforMNIST2.py
├── Cnn_layer.png
├── MNIST_DATA
│ ├── t10k-images-idx3-ubyte.gz
│ ├── t10k-labels-idx1-ubyte.gz
│ ├── train-images-idx3-ubyte.gz
│ └── train-labels-idx1-ubyte.gz
├── big_cnn.png
├── googleLayers.py
├── input_data.py
├── mycnn.JPG
└── tf_board.py
├── 6.Early Stop and Index Shuffling
└── EarlyStop.py
├── 7.TensorBoard
├── MNIST_DATA
│ └── data
│ │ ├── t10k-images-idx3-ubyte.gz
│ │ ├── t10k-labels-idx1-ubyte.gz
│ │ ├── train-images-idx3-ubyte.gz
│ │ └── train-labels-idx1-ubyte.gz
├── TensorBoard.ipynb
├── TensorBoard.py
├── XORtrain.txt
├── __init__.py
├── labels_1024.tsv
├── logs
│ └── xor_logs
│ │ ├── events.out.tfevents.1486346805.jemin-desktop
│ │ ├── events.out.tfevents.1486346813.jemin-desktop
│ │ └── events.out.tfevents.1486346816.jemin-desktop
└── mnist.py
├── 8.Save and Restore
└── Save and Restore.ipynb
├── 9.RNN
├── RealMarketPriceDataPT.csv
├── data-02-stock_daily.csv
├── gp-for-sine-wave.ipynb
├── gp-for-sine-wave.py
├── lab-12-1-hello-rnn.py
├── lab-12-2-char-seq-rnn.py
├── lab-12-3-char-seq-softmax-only.py
├── lab-12-4-rnn_long_char.py
├── lab-12-5-rnn_stock_prediction.py
├── lstm-for-epf.py
├── lstm-for-sine-wave.ipynb
├── lstm-for-sine-wave.py
├── lstm_predictor.py
└── mnist-rnn.ipynb
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *,cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # dotenv
80 | .env
81 |
82 | # virtualenv
83 | .venv/
84 | venv/
85 | ENV/
86 |
87 | # Spyder project settings
88 | .spyderproject
89 |
90 | # Rope project settings
91 | .ropeproject
92 |
93 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/.idea/other.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex1/data/ex1data1.txt:
--------------------------------------------------------------------------------
1 | 6.1101,17.592
2 | 5.5277,9.1302
3 | 8.5186,13.662
4 | 7.0032,11.854
5 | 5.8598,6.8233
6 | 8.3829,11.886
7 | 7.4764,4.3483
8 | 8.5781,12
9 | 6.4862,6.5987
10 | 5.0546,3.8166
11 | 5.7107,3.2522
12 | 14.164,15.505
13 | 5.734,3.1551
14 | 8.4084,7.2258
15 | 5.6407,0.71618
16 | 5.3794,3.5129
17 | 6.3654,5.3048
18 | 5.1301,0.56077
19 | 6.4296,3.6518
20 | 7.0708,5.3893
21 | 6.1891,3.1386
22 | 20.27,21.767
23 | 5.4901,4.263
24 | 6.3261,5.1875
25 | 5.5649,3.0825
26 | 18.945,22.638
27 | 12.828,13.501
28 | 10.957,7.0467
29 | 13.176,14.692
30 | 22.203,24.147
31 | 5.2524,-1.22
32 | 6.5894,5.9966
33 | 9.2482,12.134
34 | 5.8918,1.8495
35 | 8.2111,6.5426
36 | 7.9334,4.5623
37 | 8.0959,4.1164
38 | 5.6063,3.3928
39 | 12.836,10.117
40 | 6.3534,5.4974
41 | 5.4069,0.55657
42 | 6.8825,3.9115
43 | 11.708,5.3854
44 | 5.7737,2.4406
45 | 7.8247,6.7318
46 | 7.0931,1.0463
47 | 5.0702,5.1337
48 | 5.8014,1.844
49 | 11.7,8.0043
50 | 5.5416,1.0179
51 | 7.5402,6.7504
52 | 5.3077,1.8396
53 | 7.4239,4.2885
54 | 7.6031,4.9981
55 | 6.3328,1.4233
56 | 6.3589,-1.4211
57 | 6.2742,2.4756
58 | 5.6397,4.6042
59 | 9.3102,3.9624
60 | 9.4536,5.4141
61 | 8.8254,5.1694
62 | 5.1793,-0.74279
63 | 21.279,17.929
64 | 14.908,12.054
65 | 18.959,17.054
66 | 7.2182,4.8852
67 | 8.2951,5.7442
68 | 10.236,7.7754
69 | 5.4994,1.0173
70 | 20.341,20.992
71 | 10.136,6.6799
72 | 7.3345,4.0259
73 | 6.0062,1.2784
74 | 7.2259,3.3411
75 | 5.0269,-2.6807
76 | 6.5479,0.29678
77 | 7.5386,3.8845
78 | 5.0365,5.7014
79 | 10.274,6.7526
80 | 5.1077,2.0576
81 | 5.7292,0.47953
82 | 5.1884,0.20421
83 | 6.3557,0.67861
84 | 9.7687,7.5435
85 | 6.5159,5.3436
86 | 8.5172,4.2415
87 | 9.1802,6.7981
88 | 6.002,0.92695
89 | 5.5204,0.152
90 | 5.0594,2.8214
91 | 5.7077,1.8451
92 | 7.6366,4.2959
93 | 5.8707,7.2029
94 | 5.3054,1.9869
95 | 8.2934,0.14454
96 | 13.394,9.0551
97 | 5.4369,0.61705
98 |
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex1/data/ex1data2.txt:
--------------------------------------------------------------------------------
1 | 2104,3,399900
2 | 1600,3,329900
3 | 2400,3,369000
4 | 1416,2,232000
5 | 3000,4,539900
6 | 1985,4,299900
7 | 1534,3,314900
8 | 1427,3,198999
9 | 1380,3,212000
10 | 1494,3,242500
11 | 1940,4,239999
12 | 2000,3,347000
13 | 1890,3,329999
14 | 4478,5,699900
15 | 1268,3,259900
16 | 2300,4,449900
17 | 1320,2,299900
18 | 1236,3,199900
19 | 2609,4,499998
20 | 3031,4,599000
21 | 1767,3,252900
22 | 1888,2,255000
23 | 1604,3,242900
24 | 1962,4,259900
25 | 3890,3,573900
26 | 1100,3,249900
27 | 1458,3,464500
28 | 2526,3,469000
29 | 2200,3,475000
30 | 2637,3,299900
31 | 1839,2,349900
32 | 1000,1,169900
33 | 2040,4,314900
34 | 3137,3,579900
35 | 1811,4,285900
36 | 1437,3,249900
37 | 1239,3,229900
38 | 2132,4,345000
39 | 4215,4,549000
40 | 2162,4,287000
41 | 1664,2,368500
42 | 2238,3,329900
43 | 2567,4,314000
44 | 1200,3,299000
45 | 852,2,179900
46 | 1852,4,299900
47 | 1203,3,239500
48 |
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex1/ex1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.1.Fundamental_Neural_Network/ex1/ex1.pdf
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex2/data/ex2data1.txt:
--------------------------------------------------------------------------------
1 | 34.62365962451697,78.0246928153624,0
2 | 30.28671076822607,43.89499752400101,0
3 | 35.84740876993872,72.90219802708364,0
4 | 60.18259938620976,86.30855209546826,1
5 | 79.0327360507101,75.3443764369103,1
6 | 45.08327747668339,56.3163717815305,0
7 | 61.10666453684766,96.51142588489624,1
8 | 75.02474556738889,46.55401354116538,1
9 | 76.09878670226257,87.42056971926803,1
10 | 84.43281996120035,43.53339331072109,1
11 | 95.86155507093572,38.22527805795094,0
12 | 75.01365838958247,30.60326323428011,0
13 | 82.30705337399482,76.48196330235604,1
14 | 69.36458875970939,97.71869196188608,1
15 | 39.53833914367223,76.03681085115882,0
16 | 53.9710521485623,89.20735013750205,1
17 | 69.07014406283025,52.74046973016765,1
18 | 67.94685547711617,46.67857410673128,0
19 | 70.66150955499435,92.92713789364831,1
20 | 76.97878372747498,47.57596364975532,1
21 | 67.37202754570876,42.83843832029179,0
22 | 89.67677575072079,65.79936592745237,1
23 | 50.534788289883,48.85581152764205,0
24 | 34.21206097786789,44.20952859866288,0
25 | 77.9240914545704,68.9723599933059,1
26 | 62.27101367004632,69.95445795447587,1
27 | 80.1901807509566,44.82162893218353,1
28 | 93.114388797442,38.80067033713209,0
29 | 61.83020602312595,50.25610789244621,0
30 | 38.78580379679423,64.99568095539578,0
31 | 61.379289447425,72.80788731317097,1
32 | 85.40451939411645,57.05198397627122,1
33 | 52.10797973193984,63.12762376881715,0
34 | 52.04540476831827,69.43286012045222,1
35 | 40.23689373545111,71.16774802184875,0
36 | 54.63510555424817,52.21388588061123,0
37 | 33.91550010906887,98.86943574220611,0
38 | 64.17698887494485,80.90806058670817,1
39 | 74.78925295941542,41.57341522824434,0
40 | 34.1836400264419,75.2377203360134,0
41 | 83.90239366249155,56.30804621605327,1
42 | 51.54772026906181,46.85629026349976,0
43 | 94.44336776917852,65.56892160559052,1
44 | 82.36875375713919,40.61825515970618,0
45 | 51.04775177128865,45.82270145776001,0
46 | 62.22267576120188,52.06099194836679,0
47 | 77.19303492601364,70.45820000180959,1
48 | 97.77159928000232,86.7278223300282,1
49 | 62.07306379667647,96.76882412413983,1
50 | 91.56497449807442,88.69629254546599,1
51 | 79.94481794066932,74.16311935043758,1
52 | 99.2725269292572,60.99903099844988,1
53 | 90.54671411399852,43.39060180650027,1
54 | 34.52451385320009,60.39634245837173,0
55 | 50.2864961189907,49.80453881323059,0
56 | 49.58667721632031,59.80895099453265,0
57 | 97.64563396007767,68.86157272420604,1
58 | 32.57720016809309,95.59854761387875,0
59 | 74.24869136721598,69.82457122657193,1
60 | 71.79646205863379,78.45356224515052,1
61 | 75.3956114656803,85.75993667331619,1
62 | 35.28611281526193,47.02051394723416,0
63 | 56.25381749711624,39.26147251058019,0
64 | 30.05882244669796,49.59297386723685,0
65 | 44.66826172480893,66.45008614558913,0
66 | 66.56089447242954,41.09209807936973,0
67 | 40.45755098375164,97.53518548909936,1
68 | 49.07256321908844,51.88321182073966,0
69 | 80.27957401466998,92.11606081344084,1
70 | 66.74671856944039,60.99139402740988,1
71 | 32.72283304060323,43.30717306430063,0
72 | 64.0393204150601,78.03168802018232,1
73 | 72.34649422579923,96.22759296761404,1
74 | 60.45788573918959,73.09499809758037,1
75 | 58.84095621726802,75.85844831279042,1
76 | 99.82785779692128,72.36925193383885,1
77 | 47.26426910848174,88.47586499559782,1
78 | 50.45815980285988,75.80985952982456,1
79 | 60.45555629271532,42.50840943572217,0
80 | 82.22666157785568,42.71987853716458,0
81 | 88.9138964166533,69.80378889835472,1
82 | 94.83450672430196,45.69430680250754,1
83 | 67.31925746917527,66.58935317747915,1
84 | 57.23870631569862,59.51428198012956,1
85 | 80.36675600171273,90.96014789746954,1
86 | 68.46852178591112,85.59430710452014,1
87 | 42.0754545384731,78.84478600148043,0
88 | 75.47770200533905,90.42453899753964,1
89 | 78.63542434898018,96.64742716885644,1
90 | 52.34800398794107,60.76950525602592,0
91 | 94.09433112516793,77.15910509073893,1
92 | 90.44855097096364,87.50879176484702,1
93 | 55.48216114069585,35.57070347228866,0
94 | 74.49269241843041,84.84513684930135,1
95 | 89.84580670720979,45.35828361091658,1
96 | 83.48916274498238,48.38028579728175,1
97 | 42.2617008099817,87.10385094025457,1
98 | 99.31500880510394,68.77540947206617,1
99 | 55.34001756003703,64.9319380069486,1
100 | 74.77589300092767,89.52981289513276,1
101 |
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex2/data/ex2data2.txt:
--------------------------------------------------------------------------------
1 | 0.051267,0.69956,1
2 | -0.092742,0.68494,1
3 | -0.21371,0.69225,1
4 | -0.375,0.50219,1
5 | -0.51325,0.46564,1
6 | -0.52477,0.2098,1
7 | -0.39804,0.034357,1
8 | -0.30588,-0.19225,1
9 | 0.016705,-0.40424,1
10 | 0.13191,-0.51389,1
11 | 0.38537,-0.56506,1
12 | 0.52938,-0.5212,1
13 | 0.63882,-0.24342,1
14 | 0.73675,-0.18494,1
15 | 0.54666,0.48757,1
16 | 0.322,0.5826,1
17 | 0.16647,0.53874,1
18 | -0.046659,0.81652,1
19 | -0.17339,0.69956,1
20 | -0.47869,0.63377,1
21 | -0.60541,0.59722,1
22 | -0.62846,0.33406,1
23 | -0.59389,0.005117,1
24 | -0.42108,-0.27266,1
25 | -0.11578,-0.39693,1
26 | 0.20104,-0.60161,1
27 | 0.46601,-0.53582,1
28 | 0.67339,-0.53582,1
29 | -0.13882,0.54605,1
30 | -0.29435,0.77997,1
31 | -0.26555,0.96272,1
32 | -0.16187,0.8019,1
33 | -0.17339,0.64839,1
34 | -0.28283,0.47295,1
35 | -0.36348,0.31213,1
36 | -0.30012,0.027047,1
37 | -0.23675,-0.21418,1
38 | -0.06394,-0.18494,1
39 | 0.062788,-0.16301,1
40 | 0.22984,-0.41155,1
41 | 0.2932,-0.2288,1
42 | 0.48329,-0.18494,1
43 | 0.64459,-0.14108,1
44 | 0.46025,0.012427,1
45 | 0.6273,0.15863,1
46 | 0.57546,0.26827,1
47 | 0.72523,0.44371,1
48 | 0.22408,0.52412,1
49 | 0.44297,0.67032,1
50 | 0.322,0.69225,1
51 | 0.13767,0.57529,1
52 | -0.0063364,0.39985,1
53 | -0.092742,0.55336,1
54 | -0.20795,0.35599,1
55 | -0.20795,0.17325,1
56 | -0.43836,0.21711,1
57 | -0.21947,-0.016813,1
58 | -0.13882,-0.27266,1
59 | 0.18376,0.93348,0
60 | 0.22408,0.77997,0
61 | 0.29896,0.61915,0
62 | 0.50634,0.75804,0
63 | 0.61578,0.7288,0
64 | 0.60426,0.59722,0
65 | 0.76555,0.50219,0
66 | 0.92684,0.3633,0
67 | 0.82316,0.27558,0
68 | 0.96141,0.085526,0
69 | 0.93836,0.012427,0
70 | 0.86348,-0.082602,0
71 | 0.89804,-0.20687,0
72 | 0.85196,-0.36769,0
73 | 0.82892,-0.5212,0
74 | 0.79435,-0.55775,0
75 | 0.59274,-0.7405,0
76 | 0.51786,-0.5943,0
77 | 0.46601,-0.41886,0
78 | 0.35081,-0.57968,0
79 | 0.28744,-0.76974,0
80 | 0.085829,-0.75512,0
81 | 0.14919,-0.57968,0
82 | -0.13306,-0.4481,0
83 | -0.40956,-0.41155,0
84 | -0.39228,-0.25804,0
85 | -0.74366,-0.25804,0
86 | -0.69758,0.041667,0
87 | -0.75518,0.2902,0
88 | -0.69758,0.68494,0
89 | -0.4038,0.70687,0
90 | -0.38076,0.91886,0
91 | -0.50749,0.90424,0
92 | -0.54781,0.70687,0
93 | 0.10311,0.77997,0
94 | 0.057028,0.91886,0
95 | -0.10426,0.99196,0
96 | -0.081221,1.1089,0
97 | 0.28744,1.087,0
98 | 0.39689,0.82383,0
99 | 0.63882,0.88962,0
100 | 0.82316,0.66301,0
101 | 0.67339,0.64108,0
102 | 1.0709,0.10015,0
103 | -0.046659,-0.57968,0
104 | -0.23675,-0.63816,0
105 | -0.15035,-0.36769,0
106 | -0.49021,-0.3019,0
107 | -0.46717,-0.13377,0
108 | -0.28859,-0.060673,0
109 | -0.61118,-0.067982,0
110 | -0.66302,-0.21418,0
111 | -0.59965,-0.41886,0
112 | -0.72638,-0.082602,0
113 | -0.83007,0.31213,0
114 | -0.72062,0.53874,0
115 | -0.59389,0.49488,0
116 | -0.48445,0.99927,0
117 | -0.0063364,0.99927,0
118 | 0.63265,-0.030612,0
119 |
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex2/ex2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.1.Fundamental_Neural_Network/ex2/ex2.pdf
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex3/data/ex3data1.mat:
--------------------------------------------------------------------------------
1 | ../../ex3/data/ex3data1.mat
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex3/data/ex3weights.mat:
--------------------------------------------------------------------------------
1 | ../../ex3/data/ex3weights.mat
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex3/ex3.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.1.Fundamental_Neural_Network/ex3/ex3.pdf
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex4/data/ex4data1.mat:
--------------------------------------------------------------------------------
1 | ../../ex3/data/ex3data1.mat
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex4/data/ex4weights.mat:
--------------------------------------------------------------------------------
1 | ../../ex3/data/ex3weights.mat
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex4/ex4.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.1.Fundamental_Neural_Network/ex4/ex4.pdf
--------------------------------------------------------------------------------
/0.1.Fundamental_Neural_Network/ex4/example_fmin_cg.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from scipy import optimize\n",
12 | "import numpy as np"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 26,
18 | "metadata": {
19 | "collapsed": true
20 | },
21 | "outputs": [],
22 | "source": [
23 | "args = (2, 3, 7, 8, 9, 10) # parameter values"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {},
29 | "source": [
30 | "$$f=au^{2}+buv+cv^{2}+du+ev+f$$"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "$$f=2u^{2}+3uv+7v^{2}+8u+9v+10$$"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": 32,
43 | "metadata": {
44 | "collapsed": false
45 | },
46 | "outputs": [],
47 | "source": [
48 | "def f(x, *args):\n",
49 | " u, v = x\n",
50 | " a, b, c, d, e, f = args\n",
51 | " return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "$$\\frac{\\partial f}{\\partial u}=2au+bv+d$$\n",
59 | "\n",
60 | "$$\\frac{\\partial f}{\\partial v}=bu+2c+e$$"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 16,
66 | "metadata": {
67 | "collapsed": true
68 | },
69 | "outputs": [],
70 | "source": [
71 | "def gradf(x, *args):\n",
72 | " u, v = x\n",
73 | " a, b, c, d, e, f = args\n",
74 | " gu = 2*a*u + b*v + d # u-component of the gradient\n",
75 | " gv = b*u + 2*c*v + e # v-component of the gradient\n",
76 | " return np.asarray((gu, gv))"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": 17,
82 | "metadata": {
83 | "collapsed": false
84 | },
85 | "outputs": [],
86 | "source": [
87 | "x0 = np.asarray((0, 0)) # Initial guess."
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": 18,
93 | "metadata": {
94 | "collapsed": false
95 | },
96 | "outputs": [
97 | {
98 | "name": "stdout",
99 | "output_type": "stream",
100 | "text": [
101 | "Optimization terminated successfully.\n",
102 | " Current function value: 1.617021\n",
103 | " Iterations: 4\n",
104 | " Function evaluations: 8\n",
105 | " Gradient evaluations: 8\n"
106 | ]
107 | }
108 | ],
109 | "source": [
110 | "res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args,)"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": 19,
116 | "metadata": {
117 | "collapsed": false
118 | },
119 | "outputs": [
120 | {
121 | "data": {
122 | "text/plain": [
123 | "array([-1.80851064, -0.25531915])"
124 | ]
125 | },
126 | "execution_count": 19,
127 | "metadata": {},
128 | "output_type": "execute_result"
129 | }
130 | ],
131 | "source": [
132 | "# 최적화된 값\n",
133 | "res1"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": 31,
139 | "metadata": {
140 | "collapsed": false
141 | },
142 | "outputs": [
143 | {
144 | "data": {
145 | "text/plain": [
146 | "array([0, 0])"
147 | ]
148 | },
149 | "execution_count": 31,
150 | "metadata": {},
151 | "output_type": "execute_result"
152 | }
153 | ],
154 | "source": [
155 | "# 0으로 초기화된 값\n",
156 | "x0"
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": 30,
162 | "metadata": {
163 | "collapsed": false
164 | },
165 | "outputs": [
166 | {
167 | "data": {
168 | "text/plain": [
169 | "10"
170 | ]
171 | },
172 | "execution_count": 30,
173 | "metadata": {},
174 | "output_type": "execute_result"
175 | }
176 | ],
177 | "source": [
178 | "f(x0,*args)"
179 | ]
180 | },
181 | {
182 | "cell_type": "code",
183 | "execution_count": 29,
184 | "metadata": {
185 | "collapsed": false,
186 | "scrolled": false
187 | },
188 | "outputs": [
189 | {
190 | "data": {
191 | "text/plain": [
192 | "1.6170212765957448"
193 | ]
194 | },
195 | "execution_count": 29,
196 | "metadata": {},
197 | "output_type": "execute_result"
198 | }
199 | ],
200 | "source": [
201 | "f(res1,*args)"
202 | ]
203 | }
204 | ],
205 | "metadata": {
206 | "kernelspec": {
207 | "display_name": "Python 2",
208 | "language": "python",
209 | "name": "python2"
210 | },
211 | "language_info": {
212 | "codemirror_mode": {
213 | "name": "ipython",
214 | "version": 2
215 | },
216 | "file_extension": ".py",
217 | "mimetype": "text/x-python",
218 | "name": "python",
219 | "nbconvert_exporter": "python",
220 | "pygments_lexer": "ipython2",
221 | "version": "2.7.12"
222 | }
223 | },
224 | "nbformat": 4,
225 | "nbformat_minor": 2
226 | }
227 |
--------------------------------------------------------------------------------
/0.2.Basic/Basic Tutorial.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# TensorFlow Basic"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 3,
13 | "metadata": {
14 | "collapsed": true
15 | },
16 | "outputs": [],
17 | "source": [
18 | "import tensorflow as tf\n",
19 | "import numpy as np\n",
20 | "import matplotlib"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 6,
26 | "metadata": {
27 | "collapsed": true
28 | },
29 | "outputs": [],
30 | "source": [
31 | "hello = tf.constant('Hello, TensorFlow!')\n",
32 | "sess = tf.Session()"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": 7,
38 | "metadata": {
39 | "collapsed": false
40 | },
41 | "outputs": [
42 | {
43 | "data": {
44 | "text/plain": [
45 | ""
46 | ]
47 | },
48 | "execution_count": 7,
49 | "metadata": {},
50 | "output_type": "execute_result"
51 | }
52 | ],
53 | "source": [
54 | "sess"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 4,
60 | "metadata": {
61 | "collapsed": false
62 | },
63 | "outputs": [
64 | {
65 | "name": "stdout",
66 | "output_type": "stream",
67 | "text": [
68 | "b'Hello, TensorFlow!'\n"
69 | ]
70 | }
71 | ],
72 | "source": [
73 | "print(sess.run(hello))"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 5,
79 | "metadata": {
80 | "collapsed": false
81 | },
82 | "outputs": [
83 | {
84 | "data": {
85 | "text/plain": [
86 | ""
87 | ]
88 | },
89 | "execution_count": 5,
90 | "metadata": {},
91 | "output_type": "execute_result"
92 | }
93 | ],
94 | "source": [
95 | "hello"
96 | ]
97 | },
98 | {
99 | "cell_type": "markdown",
100 | "metadata": {},
101 | "source": [
102 | "## Constant\n",
103 | "- 상수 값을 의미 한다."
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 8,
109 | "metadata": {
110 | "collapsed": false
111 | },
112 | "outputs": [
113 | {
114 | "data": {
115 | "text/plain": [
116 | ""
117 | ]
118 | },
119 | "execution_count": 8,
120 | "metadata": {},
121 | "output_type": "execute_result"
122 | }
123 | ],
124 | "source": [
125 | "# Create a Constant op that produces a 1x2 matrix. The op is\n",
126 | "# added as a node to the default graph.\n",
127 | "#\n",
128 | "# The value returned by the constructor represents the output\n",
129 | "# of the Constant op.\n",
130 | "matrix1 = tf.constant([[3., 3.]])\n",
131 | "\n",
132 | "# Create another Constant that produces a 2x1 matrix.\n",
133 | "matrix2 = tf.constant([[2.],[2.]])\n",
134 | "\n",
135 | "# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n",
136 | "# The returned value, 'product', represents the result of the matrix\n",
137 | "# multiplication.\n",
138 | "product = tf.matmul(matrix1, matrix2)\n",
139 | "product"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 9,
145 | "metadata": {
146 | "collapsed": false
147 | },
148 | "outputs": [
149 | {
150 | "name": "stdout",
151 | "output_type": "stream",
152 | "text": [
153 | "[array([[ 12.]], dtype=float32)]\n"
154 | ]
155 | }
156 | ],
157 | "source": [
158 | "# Launch the default graph.\n",
159 | "sess = tf.Session()\n",
160 | "\n",
161 | "with tf.Session() as sess:\n",
162 | " result = sess.run([product])\n",
163 | " print(result)"
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "## Variables\n",
171 | "- 실행중 계속 변화하는 값을 의미한다."
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": 14,
177 | "metadata": {
178 | "collapsed": false,
179 | "scrolled": true
180 | },
181 | "outputs": [
182 | {
183 | "name": "stdout",
184 | "output_type": "stream",
185 | "text": [
186 | "0\n",
187 | "1\n",
188 | "2\n",
189 | "3\n"
190 | ]
191 | }
192 | ],
193 | "source": [
194 | "# Create a Variable, that will be initialized to the scalar value 0.\n",
195 | "state = tf.Variable(0, name=\"counter\")\n",
196 | "\n",
197 | "# Create an Op to add one to `state`.\n",
198 | "\n",
199 | "one = tf.constant(1)\n",
200 | "new_value = tf.add(state, one)\n",
201 | "update = tf.assign(state, new_value)\n",
202 | "\n",
203 | "# Variables must be initialized by running an `init` Op after having\n",
204 | "# launched the graph. We first have to add the `init` Op to the graph.\n",
205 | "init_op = tf.global_variables_initializer()\n",
206 | "\n",
207 | "# Launch the graph and run the ops.\n",
208 | "with tf.Session() as sess:\n",
209 | " # Run the 'init' op\n",
210 | " sess.run(init_op)\n",
211 | " # Print the initial value of 'state'\n",
212 | " print(sess.run(state))\n",
213 | " # Run the op that updates 'state' and print 'state'.\n",
214 | " for _ in range(3):\n",
215 | " sess.run(update)\n",
216 | " print(sess.run(state))"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "metadata": {},
222 | "source": [
223 | "\n",
224 | "## Placeholder\n",
225 | "\n",
226 | "- 추후에 값을 `feed_dict`를 이용해서 session을 실행할 때 전달 할 수 있다."
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": 10,
232 | "metadata": {
233 | "collapsed": true
234 | },
235 | "outputs": [],
236 | "source": [
237 | "a = tf.placeholder(tf.int16)\n",
238 | "b = tf.placeholder(tf.int16)"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": 11,
244 | "metadata": {
245 | "collapsed": false
246 | },
247 | "outputs": [],
248 | "source": [
249 | "# Define some operations\n",
250 | "add = tf.add(a, b)\n",
251 | "mul = tf.mul(a,b)"
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": 12,
257 | "metadata": {
258 | "collapsed": false,
259 | "scrolled": true
260 | },
261 | "outputs": [
262 | {
263 | "name": "stdout",
264 | "output_type": "stream",
265 | "text": [
266 | "Addition with variables: 5\n",
267 | "Multiplication with variables: 25\n"
268 | ]
269 | }
270 | ],
271 | "source": [
272 | "with tf.Session() as sess:\n",
273 | " print (\"Addition with variables: %i\" % sess.run(add, feed_dict={a:2, b:3}))\n",
274 | " print (\"Multiplication with variables: %d\" % sess.run(mul, feed_dict={a:5., b:5.}))"
275 | ]
276 | },
277 | {
278 | "cell_type": "markdown",
279 | "metadata": {},
280 | "source": [
281 | "## Simple Neuran"
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "metadata": {},
287 | "source": [
288 | "구현할 뉴련은 activiation function도 없고 bias도 없는 가장 간단한 형태이다.\n",
289 | "$$w \\times x = 0$$\n",
290 | "위 식을 최적화하는 w를 구하게 된다."
291 | ]
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {},
296 | "source": [
297 | "참이되는 w의 값은 0이다.\n",
298 | "초기에 w를 0.8로 초기화 하고 정의된 $cost$함수에 대해서 `gradient descent algorithm`을 수행하면 아래와 같다."
299 | ]
300 | },
301 | {
302 | "cell_type": "markdown",
303 | "metadata": {},
304 | "source": [
305 | "$$cost=(\\hat{y}-y)^{2}$$"
306 | ]
307 | },
308 | {
309 | "cell_type": "markdown",
310 | "metadata": {},
311 | "source": [
312 | "$$\\frac{\\partial cost}{\\partial w} = 2 \\times x$$"
313 | ]
314 | },
315 | {
316 | "cell_type": "markdown",
317 | "metadata": {},
318 | "source": [
319 | "**Gradient Descent**\n",
320 | "$$w = w - \\alpha \\frac{\\partial cost}{\\partial w}$$"
321 | ]
322 | },
323 | {
324 | "cell_type": "code",
325 | "execution_count": 13,
326 | "metadata": {
327 | "collapsed": false
328 | },
329 | "outputs": [
330 | {
331 | "name": "stdout",
332 | "output_type": "stream",
333 | "text": [
334 | "epoch 0, output: 0.800000011920929\n",
335 | "epoch 10, output: 0.47898948192596436\n",
336 | "epoch 20, output: 0.2867887020111084\n",
337 | "epoch 30, output: 0.17171096801757812\n",
338 | "epoch 40, output: 0.10280970484018326\n",
339 | "epoch 50, output: 0.06155596673488617\n",
340 | "epoch 60, output: 0.03685583174228668\n",
341 | "epoch 70, output: 0.02206694707274437\n",
342 | "epoch 80, output: 0.013212298974394798\n",
343 | "epoch 90, output: 0.007910690270364285\n"
344 | ]
345 | }
346 | ],
347 | "source": [
348 | "x = tf.constant(1.0, name='input')\n",
349 | "w = tf.Variable(0.8, name='weight')\n",
350 | "\n",
351 | "y = tf.mul(w, x, name='output')\n",
352 | "\n",
353 | "y_ = tf.constant(0.0, name='correct_value')\n",
354 | "loss = tf.pow(y - y_, 2, name='loss')\n",
355 | "train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)\n",
356 | "sess = tf.Session()\n",
357 | "init_op = tf.global_variables_initializer()\n",
358 | "\n",
359 | "# Launch the graph and run the ops.\n",
360 | "with tf.Session() as sess:\n",
361 | " # Run the 'init' op\n",
362 | " sess.run(init_op)\n",
363 | " for i in range(100):\n",
364 | " if i % 10 ==0:\n",
365 | " print(\"epoch {}, output: {}\".format(i, sess.run(y)))\n",
366 | " sess.run(train_step)"
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": null,
372 | "metadata": {
373 | "collapsed": true
374 | },
375 | "outputs": [],
376 | "source": []
377 | }
378 | ],
379 | "metadata": {
380 | "kernelspec": {
381 | "display_name": "Python 3",
382 | "language": "python",
383 | "name": "python3"
384 | },
385 | "language_info": {
386 | "codemirror_mode": {
387 | "name": "ipython",
388 | "version": 3
389 | },
390 | "file_extension": ".py",
391 | "mimetype": "text/x-python",
392 | "name": "python",
393 | "nbconvert_exporter": "python",
394 | "pygments_lexer": "ipython3",
395 | "version": "3.5.2"
396 | }
397 | },
398 | "nbformat": 4,
399 | "nbformat_minor": 2
400 | }
401 |
--------------------------------------------------------------------------------
/0.2.Basic/Basic_usageOfConstant.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | # Create a Constant op that produces a 1x2 matrix. The op is
4 | # added as a node to the default graph.
5 | #
6 | # The value returned by the constructor represents the output
7 | # of the Constant op.
8 | matrix1 = tf.constant([[3., 3.]])
9 |
10 | # Create another Constant that produces a 2x1 matrix.
11 | matrix2 = tf.constant([[2.],[2.]])
12 |
13 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
14 | # The returned value, 'product', represents the result of the matrix
15 | # multiplication.
16 | product = tf.matmul(matrix1, matrix2)
17 |
18 | # Launch the default graph.
19 | sess = tf.Session()
20 |
21 | with tf.Session() as sess:
22 | result = sess.run([product])
23 | print(result)
--------------------------------------------------------------------------------
/0.2.Basic/Basic_usageOfPlaceholder.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | input1 = tf.placeholder(tf.float32)
5 | input2 = tf.placeholder(tf.float32)
6 | output = tf.multiply(input1, input2)
7 |
8 | with tf.Session() as sess:
9 | print(sess.run([output], feed_dict={input1:[7.], input2:[2.]}))
10 |
11 | # output:
12 | # [array([ 14.], dtype=float32)]
--------------------------------------------------------------------------------
/0.2.Basic/Basic_usageOfVariables.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | # Create a Variable, that will be initialized to the scalar value 0.
4 | state = tf.Variable(0, name="counter")
5 |
6 | # Create an Op to add one to `state`.
7 |
8 | one = tf.constant(1)
9 | new_value = tf.add(state, one)
10 | update = tf.assign(state, new_value)
11 |
12 | # Variables must be initialized by running an `init` Op after having
13 | # launched the graph. We first have to add the `init` Op to the graph.
14 | init_op = tf.global_variables_initializer()
15 |
16 | # Launch the graph and run the ops.
17 | with tf.Session() as sess:
18 | # Run the 'init' op
19 | sess.run(init_op)
20 | # Print the initial value of 'state'
21 | print(sess.run(state))
22 | # Run the op that updates 'state' and print 'state'.
23 | for _ in range(3):
24 | sess.run(update)
25 | print(sess.run(state))
26 |
27 | # output:
28 |
29 | # 0
30 | # 1
31 | # 2
32 | # 3
--------------------------------------------------------------------------------
/0.2.Basic/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/__init__.py
--------------------------------------------------------------------------------
/0.2.Basic/basic.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on Nov 17, 2015
3 |
4 | @author: root
5 | '''
6 |
7 | import tensorflow as tf
8 |
9 | a = tf.placeholder(tf.int16)
10 | b = tf.placeholder(tf.int16)
11 |
12 | # Define some operations
13 | add = tf.add(a, b)
14 | mul = tf.multiply(a, b)
15 |
16 | with tf.Session() as sess:
17 | print ("Addition with variables: %i" % sess.run(add, feed_dict={a:2, b:3}))
18 | print ("Multiplication with variables: %d" % sess.run(mul, feed_dict={a:2, b:3}))
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/0.2.Basic/hellowTensorFlow.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on Nov 17, 2015
3 |
4 | @author: root
5 | '''
6 | import tensorflow as tf
7 | hello = tf.constant('Hello, TensorFlow!')
8 | sess = tf.Session()
9 | print (sess.run(hello))
10 |
11 | a = tf.constant(10)
12 | b = tf.constant(32)
13 | print (sess.run(a+b))
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485839642.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485839642.jemin-desktop
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485839723.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485839723.jemin-desktop
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485839766.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485839766.jemin-desktop
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485839778.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485839778.jemin-desktop
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485839840.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485839840.jemin-desktop
--------------------------------------------------------------------------------
/0.2.Basic/log_simple_stats/events.out.tfevents.1485840189.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/0.2.Basic/log_simple_stats/events.out.tfevents.1485840189.jemin-desktop
--------------------------------------------------------------------------------
/1.Linear Regression/LinearRegression.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | x_data = [1,2,3]
4 | y_data = [1,2,3]
5 |
6 | # Try to find values for W and b taht compute y_data = W * x_data + b
7 | # (We know that W should be 1 and b 0, but Tensorflow will figure that out for us.)
8 |
9 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
10 | b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
11 |
12 | # with placeholder
13 | X = tf.placeholder(tf.float32)
14 | Y = tf.placeholder(tf.float32)
15 |
16 |
17 | # Our hypothesis
18 | hypothesis = W * X + b
19 |
20 | # Simplified cost function
21 | cost = tf.reduce_mean(tf.square(hypothesis-Y))
22 |
23 | # Minimize
24 | a = tf.Variable(0.1) # Learning rate, alpha
25 | optimizer = tf.train.GradientDescentOptimizer(a)
26 | train = optimizer.minimize(cost)
27 |
28 | # Before starting, initialize the variables.
29 | # We are going to run this first.
30 | init = tf.global_variables_initializer()
31 |
32 | # Launch the graph
33 | sess = tf.Session()
34 | sess.run(init)
35 |
36 | # Fit the line.
37 | for step in xrange(2001):
38 | sess.run(train, feed_dict={X:x_data, Y:y_data})
39 | if step % 100 == 0 :
40 | print step, sess.run(cost,feed_dict={X:x_data, Y:y_data}), sess.run(W), sess.run(b)
41 |
42 | # Learns best fit is W: [1], b[0]
43 | print sess.run(hypothesis, feed_dict={X:5})
44 | print sess.run(hypothesis, feed_dict={X:2.5})
--------------------------------------------------------------------------------
/1.Linear Regression/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/1.Linear Regression/__init__.py
--------------------------------------------------------------------------------
/1.Linear Regression/multiFeaturesTrain.txt:
--------------------------------------------------------------------------------
1 | #x0 x1 x2 y
2 | 1 1 0 1
3 | 1 0 2 2
4 | 1 3 0 3
5 | 1 0 4 4
6 | 1 5 0 5
--------------------------------------------------------------------------------
/1.Linear Regression/multiVariableLinearRegression.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('multiFeaturesTrain.txt', unpack=True, dtype='float32')
5 | x_data = xy[0:-1]
6 | y_data = xy[-1]
7 |
8 | W = tf.Variable(tf.random_uniform([1, len(x_data)], -1, 1))
9 | b = tf.Variable(tf.random_uniform([1], -1, 1))
10 |
11 | hypothesis = tf.matmul(W, x_data)
12 |
13 | cost = tf.reduce_mean(tf.square(hypothesis - y_data))
14 |
15 | a = tf.Variable(0.001) # learning rate, alpha
16 | optimizer = tf.train.GradientDescentOptimizer(a)
17 | train = optimizer.minimize(cost) # goal is minimize cost
18 |
19 | init = tf.global_variables_initializer()
20 |
21 | sess = tf.Session()
22 | sess.run(init)
23 |
24 | # Fit the line.
25 | for step in xrange(2001):
26 | sess.run(train)
27 | if step % 400 == 0:
28 | print step, "cost=", "{:.9f}".format(sess.run(cost)), sess.run(W), sess.run(b)
29 |
30 | # Learns best fit is W: [1], b[0]
31 |
32 | #print sess.run(hypothesis, feed_dict={X: 5})
33 | #print sess.run(hypothesis, feed_dict={X: 2.5})
--------------------------------------------------------------------------------
/2.Logistic Classification/LogisticRegressionBasic.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('logisticTrain.txt', unpack=True, dtype='float32')
5 |
6 | x_data = xy[0:-1]
7 | y_data = xy[-1]
8 |
9 | X = tf.placeholder(tf.float32)
10 | Y = tf.placeholder(tf.float32)
11 |
12 | W = tf.Variable(tf.random_uniform([1,len(x_data)],-1.0, 1.0))
13 |
14 | # Our hypothesis
15 | h = tf.matmul(W, X)
16 | hypothesis = tf.div(1., 1+tf.exp(-h))
17 |
18 | # Cost function
19 | cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
20 |
21 | # Minimize
22 | a = tf.Variable(0.1) # Learning rate, alpha
23 | optimizer = tf.train.GradientDescentOptimizer(a)
24 | train = optimizer.minimize(cost)
25 |
26 | # Before starting, initialize the variables. We will `run` this first.
27 | init = tf.global_variables_initializer()
28 |
29 | # Launch the graph.
30 | with tf.Session() as sess:
31 | sess.run(init)
32 |
33 | # Fit the line.
34 | for step in xrange(2000):
35 | sess.run(train, feed_dict={X:x_data, Y:y_data})
36 | if step % 200 == 0:
37 | print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W)
38 |
39 | # Test model: re-substitution error
40 | correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
41 | # Calculate accuracy
42 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
43 | print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data})
44 | print "Accuracy:", accuracy.eval({X:x_data, Y:y_data})
45 |
46 | print '----------------------------------------'
47 | # study hour attendance
48 | # unseen data, but we don't know exact answer
49 | print sess.run(hypothesis, feed_dict={X:[[1], [2], [2]]}) > 0.5
50 | print sess.run(hypothesis, feed_dict={X:[[1], [5], [5]]}) > 0.5
51 |
52 | print sess.run(hypothesis, feed_dict={X:[[1, 1], [4, 3], [3, 5]]}) > 0.5
--------------------------------------------------------------------------------
/2.Logistic Classification/SoftmaxClassification.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('softmaxTrain.txt', unpack=True, dtype='float')
5 |
6 | x_data = np.transpose(xy[0:3])
7 | y_data = np.transpose(xy[3:])
8 | # tensorflow graph input
9 | X = tf.placeholder('float', [None, 3]) # x1, x2, and 1 (for bias)
10 | Y = tf.placeholder('float', [None, 3]) # A, B, C = > three classes
11 | # set model weights
12 | W = tf.Variable(tf.random_uniform([3,3],-1.0, 1.0))
13 |
14 | # Our hypothesis
15 | hypothesis = tf.nn.softmax(tf.matmul(X, W)) # softmax
16 |
17 | # Cost function: cross entropy
18 | cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis), axis=1))
19 |
20 | # Minimize
21 | a = tf.Variable(0.2) # Learning rate, alpha
22 | optimizer = tf.train.GradientDescentOptimizer(a)
23 | train = optimizer.minimize(cost)
24 |
25 | # Before starting, initialize the variables. We will `run` this first.
26 | init = tf.global_variables_initializer()
27 |
28 | # Launch the graph,
29 | with tf.Session() as sess:
30 | sess.run(init)
31 |
32 | # Fit the line.
33 | for step in xrange(10000):
34 | sess.run(train, feed_dict={X:x_data, Y:y_data})
35 | if step % 200 == 0:
36 | print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W)
37 |
38 | # Test model
39 | correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
40 | # Calculate accuracy (re-substitution error)
41 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
42 | print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data})
43 | print "Accuracy:", accuracy.eval({X:x_data, Y:y_data})
44 |
45 | # Test & one-hot encoding
46 | a = sess.run(hypothesis, feed_dict={X:[[1, 11, 7]]})
47 | print a, sess.run(tf.arg_max(a,1))
48 |
49 | b = sess.run(hypothesis, feed_dict={X: [[1, 3, 4]]})
50 | print b, sess.run(tf.arg_max(b, 1))
51 |
52 | c = sess.run(hypothesis, feed_dict={X: [[1, 1, 0]]})
53 | print b, sess.run(tf.arg_max(c, 1))
54 |
55 | all = sess.run(hypothesis, feed_dict={X:[[1, 11, 7], [1, 3, 4], [1, 1, 0]]})
56 | print all, sess.run(tf.arg_max(all, 1))
57 |
--------------------------------------------------------------------------------
/2.Logistic Classification/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/2.Logistic Classification/__init__.py
--------------------------------------------------------------------------------
/2.Logistic Classification/logisticTrain.txt:
--------------------------------------------------------------------------------
1 | #x0 x1 x2 y
2 | 1 2 1 0
3 | 1 3 2 0
4 | 1 3 4 0
5 | 1 5 5 1
6 | 1 7 5 1
7 | 1 2 5 1
8 |
9 |
--------------------------------------------------------------------------------
/2.Logistic Classification/softmaxTrain.txt:
--------------------------------------------------------------------------------
1 | #x0 x1 x2 y [A,B,C]
2 | 1 2 1 0 0 1
3 | 1 3 2 0 0 1
4 | 1 3 4 0 0 1
5 | 1 5 5 0 1 0
6 | 1 7 5 0 1 0
7 | 1 2 5 0 1 0
8 | 1 6 6 1 0 0
9 | 1 7 7 1 0 0
10 |
--------------------------------------------------------------------------------
/3.XOR/XORtrain.txt:
--------------------------------------------------------------------------------
1 | 0 0 0
2 | 0 1 1
3 | 1 0 1
4 | 1 1 0
5 |
6 |
--------------------------------------------------------------------------------
/3.XOR/XORwithLogisticRegression.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('XORtrain.txt', unpack=True)
5 |
6 | x_data = xy[0:-1]
7 | y_data = xy[-1]
8 |
9 | X = tf.placeholder(tf.float32)
10 | Y = tf.placeholder(tf.float32)
11 |
12 | W = tf.Variable(tf.random_uniform([1,len(x_data)],-1.0, 1.0))
13 |
14 | # Our hypothesis
15 | h = tf.matmul(W, X)
16 | hypothesis = tf.div(1., 1+tf.exp(-h))
17 |
18 | # Cost function
19 | cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
20 |
21 | # Minimize
22 | a = tf.Variable(0.01) # Learning rate, alpha
23 | optimizer = tf.train.GradientDescentOptimizer(a)
24 | train = optimizer.minimize(cost)
25 |
26 | # Before starting, initialize the variables. We will `run` this first.
27 | init = tf.global_variables_initializer()
28 |
29 | # Launch the graph,
30 | with tf.Session() as sess:
31 | sess.run(init)
32 |
33 | # Fit the line.
34 | for step in xrange(5000):
35 | sess.run(train, feed_dict={X:x_data, Y:y_data})
36 | if step % 200 == 0:
37 | print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W)
38 |
39 | # Test model
40 | correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
41 | # Calculate accuracy
42 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
43 | print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data})
44 | print "Accuracy:", accuracy.eval({X:x_data, Y:y_data})
--------------------------------------------------------------------------------
/3.XOR/XORwithNN.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('XORtrain.txt', unpack=True)
5 | #xy = np.loadtxt('XORtrain.txt')
6 |
7 | #x_data = xy[0:-1]
8 | x_data = np.transpose(xy[0:-1])
9 | #y_data = xy[-1]
10 | y_data = np.reshape(xy[-1], (4, 1))
11 |
12 | X = tf.placeholder(tf.float32)
13 | Y = tf.placeholder(tf.float32)
14 |
15 | W1 = tf.Variable(tf.random_uniform([2, 2], -1.0, 1.0))
16 | W2 = tf.Variable(tf.random_uniform([2, 1], -1.0, 1.0))
17 |
18 | b1 = tf.Variable(tf.zeros([2]), name="Bias1")
19 | b2 = tf.Variable(tf.zeros([1]), name="Bias1")
20 |
21 |
22 | # Our hypothesis
23 | L2 = tf.sigmoid(tf.matmul(X, W1) + b1)
24 | hypothesis = tf.sigmoid(tf.matmul(L2, W2) + b2)
25 |
26 | # Cost function
27 | cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
28 |
29 | # Minimize
30 | a = tf.Variable(0.1) # Learning rate, alpha
31 | optimizer = tf.train.GradientDescentOptimizer(a)
32 | train = optimizer.minimize(cost)
33 |
34 | # Before starting, initialize the variables. We will `run` this first.
35 | init = tf.global_variables_initializer()
36 |
37 |
38 | # Launch the graph,
39 | with tf.Session() as sess:
40 |
41 | sess.run(init)
42 | # Fit the line.
43 | for step in xrange(10000):
44 | sess.run(train, feed_dict={X:x_data, Y:y_data})
45 | if step % 200 == 0:
46 | print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W1), sess.run(W2)
47 |
48 | # Test model
49 | correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
50 | # Calculate accuracy
51 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
52 | print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data})
53 | print "Accuracy:", accuracy.eval({X:x_data, Y:y_data})
--------------------------------------------------------------------------------
/3.XOR/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/3.XOR/__init__.py
--------------------------------------------------------------------------------
/4.MNIST/DNN_DropoutForMNIST.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | from tensorflow.examples.tutorials.mnist import input_data
4 |
5 |
6 | def xavier_init(n_inputs, n_outputs, uniform = True):
7 | if uniform:
8 | init_range = tf.sqrt(6.0/ (n_inputs + n_outputs))
9 | return tf.random_uniform_initializer(-init_range, init_range)
10 |
11 | else:
12 | stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
13 | return tf.truncated_normal_initializer(stddev=stddev)
14 |
15 | learning_rate = 0.001
16 | training_epochs = 25
17 | batch_size = 100
18 | display_step = 1
19 |
20 | mnist = input_data.read_data_sets("./MNIST_DATA", one_hot=True)
21 | # tensorflow graph input
22 | X = tf.placeholder('float', [None, 784]) # mnist data image of shape 28 * 28 = 784
23 | Y = tf.placeholder('float', [None, 10]) # 0-9 digits recognition = > 10 classes
24 |
25 | # set dropout rate
26 | dropout_rate = tf.placeholder("float")
27 |
28 | # set model weights
29 | W1 = tf.get_variable("W1", shape=[784, 256], initializer=xavier_init(784, 256))
30 | W2 = tf.get_variable("W2", shape=[256, 256], initializer=xavier_init(256, 256))
31 | W3 = tf.get_variable("W3", shape=[256, 256], initializer=xavier_init(256, 256))
32 | W4 = tf.get_variable("W4", shape=[256, 256], initializer=xavier_init(256, 256))
33 | W5 = tf.get_variable("W5", shape=[256, 10], initializer=xavier_init(256, 10))
34 |
35 | B1 = tf.Variable(tf.random_normal([256]))
36 | B2 = tf.Variable(tf.random_normal([256]))
37 | B3 = tf.Variable(tf.random_normal([256]))
38 | B4 = tf.Variable(tf.random_normal([256]))
39 | B5 = tf.Variable(tf.random_normal([10]))
40 |
41 | # Construct model
42 | _L1 = tf.nn.relu(tf.add(tf.matmul(X,W1),B1))
43 | L1 = tf.nn.dropout(_L1, dropout_rate)
44 | _L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2),B2)) # Hidden layer with ReLU activation
45 | L2 = tf.nn.dropout(_L2, dropout_rate)
46 | _L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3),B3)) # Hidden layer with ReLU activation
47 | L3 = tf.nn.dropout(_L3, dropout_rate)
48 | _L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4),B4)) # Hidden layer with ReLU activation
49 | L4 = tf.nn.dropout(_L4, dropout_rate)
50 |
51 | hypothesis = tf.add(tf.matmul(L4, W5), B5) # No need to use softmax here
52 |
53 | # Define loss and optimizer
54 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y)) # Softmax loss
55 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
56 |
57 | # Initializing the variables
58 | init = tf.global_variables_initializer()
59 |
60 | # Launch the graph,
61 | with tf.Session() as sess:
62 | sess.run(init)
63 |
64 | # Training cycle
65 | for epoch in range(training_epochs):
66 | avg_cost = 0.
67 | total_batch = int(mnist.train.num_examples/batch_size)
68 |
69 | # Fit the line.
70 | for step in range(total_batch):
71 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
72 |
73 | # Fit training using batch data
74 | # set up 0.7 for training time
75 | sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
76 |
77 | # Compute average loss
78 | avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})/total_batch
79 | # Display logs per epoch step
80 | if epoch % display_step == 0:
81 | print("Epoch:", '%04d' %(epoch+1), "cost=", "{:.9f}".format(avg_cost))
82 |
83 | print("Optimization Finished!")
84 |
85 | # Test model
86 | correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
87 | # Calculate accuracy
88 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
89 | print ("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels, dropout_rate: 1}))
--------------------------------------------------------------------------------
/4.MNIST/DNNforMNIST.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | import input_data
4 |
5 |
6 | def xaver_init(n_inputs, n_outputs, uniform = True):
7 | if uniform:
8 | init_range = tf.sqrt(6.0/ (n_inputs + n_outputs))
9 | return tf.random_uniform_initializer(-init_range, init_range)
10 |
11 | else:
12 | stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
13 | return tf.truncated_normal_initializer(stddev=stddev)
14 |
15 | learning_rate = 0.01
16 | training_epochs = 15
17 | batch_size = 100
18 | display_step = 1
19 |
20 | mnist = input_data.read_data_sets("./MNIST_DATA", one_hot=True)
21 | # tensorflow graph input
22 | X = tf.placeholder('float', [None, 784]) # mnist data image of shape 28 * 28 = 784
23 | Y = tf.placeholder('float', [None, 10]) # 0-9 digits recognition = > 10 classes
24 |
25 | # set model weights
26 | W1 = tf.get_variable("W1", shape=[784, 256], initializer=xaver_init(784, 256))
27 | W2 = tf.get_variable("W2", shape=[256, 256], initializer=xaver_init(256, 256))
28 | W3 = tf.get_variable("W3", shape=[256, 10], initializer=xaver_init(256, 10))
29 |
30 | #W1 = tf.Variable(tf.random_normal([784, 256]))
31 | #W2 = tf.Variable(tf.random_normal([256, 256]))
32 | #W3 = tf.Variable(tf.random_normal([256, 10]))
33 |
34 | B1 = tf.Variable(tf.random_normal([256]))
35 | B2 = tf.Variable(tf.random_normal([256]))
36 | B3 = tf.Variable(tf.random_normal([10]))
37 |
38 | # Construct model
39 | L1 = tf.nn.relu(tf.add(tf.matmul(X,W1),B1))
40 | L2 = tf.nn.relu(tf.add(tf.matmul(L1,W2),B2)) # Hidden layer with RELU activation
41 | hypothesis = tf.add(tf.matmul(L2, W3), B3) # No need to use softmax here
42 |
43 | # Define loss and optimizer
44 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y)) # Softmax loss
45 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
46 |
47 | # Initializing the variables
48 | init = tf.global_variables_initializer()
49 |
50 | # Launch the graph,
51 | with tf.Session() as sess:
52 | sess.run(init)
53 |
54 | # Training cycle
55 | for epoch in range(training_epochs):
56 | avg_cost = 0.
57 | total_batch = int(mnist.train.num_examples/batch_size)
58 |
59 | # Fit the line.
60 | for step in xrange(total_batch):
61 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
62 |
63 | # Fit training using batch data
64 |
65 | sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
66 |
67 | # Compute average loss
68 | avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys})/total_batch
69 | # Display logs per epoch step
70 | if epoch % display_step == 0:
71 | print "Epoch:", '%04d' %(epoch+1), "cost=", "{:.9f}".format(avg_cost)
72 |
73 | print "Optimization Finished!"
74 |
75 | # Test model
76 | correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
77 | # Calculate accuracy
78 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
79 | print ("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
--------------------------------------------------------------------------------
/4.MNIST/MNIST_DATA/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/4.MNIST/MNIST_DATA/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/4.MNIST/MNIST_DATA/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/4.MNIST/MNIST_DATA/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/4.MNIST/MNIST_DATA/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/4.MNIST/MNIST_DATA/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/4.MNIST/MNIST_DATA/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/4.MNIST/MNIST_DATA/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/4.MNIST/SoftmaxClassificationMNIST.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | import input_data
4 |
5 | learning_rate = 0.01
6 | training_epochs = 25
7 | batch_size = 100
8 | display_step = 1
9 |
10 | mnist = input_data.read_data_sets("./MNIST_DATA", one_hot=True)
11 |
12 | # tensorflow graph input
13 | X = tf.placeholder('float', [None, 784]) # mnist data image of shape 28 * 28 = 784
14 | Y = tf.placeholder('float', [None, 10]) # 0-9 digits recognition = > 10 classes
15 |
16 | # set model weights
17 | W = tf.Variable(tf.zeros([784, 10]))
18 | b = tf.Variable(tf.zeros([10]))
19 |
20 | # Our hypothesis
21 | activation = tf.add(tf.matmul(X, W),b) # Softmax
22 |
23 | # Cost function: cross entropy
24 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=activation, labels=Y))
25 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descen
26 |
27 | # Before starting, initialize the variables. We will `run` this first.
28 | init = tf.global_variables_initializer()
29 |
30 | # Launch the graph,
31 | with tf.Session() as sess:
32 | sess.run(init)
33 |
34 | # Training cycle
35 | for epoch in range(training_epochs):
36 | avg_cost = 0.
37 | total_batch = int(mnist.train.num_examples/batch_size)
38 |
39 | # Fit the line.
40 | for step in range(total_batch):
41 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
42 |
43 | # Fit training using batch data
44 |
45 | sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
46 |
47 | # Compute average loss
48 | avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys})/total_batch
49 | # Display logs per epoch step
50 | if epoch % display_step == 0:
51 | print ("Epoch:", '%04d' %(epoch+1), "cost=", "{:.9f}".format(avg_cost))
52 |
53 | print ("Optimization Finished!")
54 |
55 | # Test model
56 | correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(Y, 1))
57 | # Calculate accuracy
58 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
59 | print ("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))
--------------------------------------------------------------------------------
/4.MNIST/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/4.MNIST/__init__.py
--------------------------------------------------------------------------------
/4.MNIST/googleLayers.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Google Inc. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | """Utility functions for adding layers to a Model.
12 | NB: This is used by PrettyTensor, but it will be deprecated. Please do not use!
13 | """
14 | import math
15 |
16 | import tensorflow as tf
17 |
18 | # Implementation note: this takes a tuple for an activation instead of
19 | # encouraging lambdas so that we can inspect the actual function and add
20 | # appropriate summaries.
21 |
22 | def add_l2loss(books, params, l2loss, name='weight_decay'):
23 | if l2loss:
24 | books.add_loss(
25 | tf.multiply(tf.nn.l2_loss(params), l2loss, name=name),
26 | regularization=True)
27 |
28 |
29 | def xavier_init(n_inputs, n_outputs, uniform=True):
30 | """Set the parameter initialization using the method described.
31 | This method is designed to keep the scale of the gradients roughly the same
32 | in all layers.
33 | Xavier Glorot and Yoshua Bengio (2010):
34 | Understanding the difficulty of training deep feedforward neural
35 | networks. International conference on artificial intelligence and
36 | statistics.
37 | Args:
38 | n_inputs: The number of input nodes into each output.
39 | n_outputs: The number of output nodes for each input.
40 | uniform: If true use a uniform distribution, otherwise use a normal.
41 | Returns:
42 | An initializer.
43 | """
44 | if uniform:
45 | # 6 was used in the paper.
46 | init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
47 | return tf.random_uniform_initializer(-init_range, init_range)
48 | else:
49 | # 3 gives us approximately the same limits as above since this repicks
50 | # values greater than 2 standard deviations from the mean.
51 | stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
52 | return tf.truncated_normal_initializer(stddev=stddev)
53 |
54 |
55 | def spatial_slice_zeros(x):
56 | """Experimental summary that shows how many planes are unused for a batch."""
57 | return tf.cast(tf.reduce_all(tf.less_equal(x, 0.0), [0, 1, 2]),
58 | tf.float32)
--------------------------------------------------------------------------------
/4.MNIST/input_data.py:
--------------------------------------------------------------------------------
1 | """Functions for downloading and reading MNIST data."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import gzip
7 | import os
8 |
9 | import numpy
10 | from six.moves import urllib
11 | from six.moves import xrange # pylint: disable=redefined-builtin
12 |
13 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
14 |
15 |
16 | def maybe_download(filename, work_directory):
17 | """Download the data from Yann's website, unless it's already here."""
18 | if not os.path.exists(work_directory):
19 | os.mkdir(work_directory)
20 | filepath = os.path.join(work_directory, filename)
21 | if not os.path.exists(filepath):
22 | filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
23 | statinfo = os.stat(filepath)
24 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
25 | return filepath
26 |
27 |
28 | def _read32(bytestream):
29 | dt = numpy.dtype(numpy.uint32).newbyteorder('>')
30 | return numpy.frombuffer(bytestream.read(4), dtype=dt)
31 |
32 |
33 | def extract_images(filename):
34 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
35 | print('Extracting', filename)
36 | with gzip.open(filename) as bytestream:
37 | magic = _read32(bytestream)
38 | if magic != 2051:
39 | raise ValueError(
40 | 'Invalid magic number %d in MNIST image file: %s' %
41 | (magic, filename))
42 | num_images = _read32(bytestream)
43 | rows = _read32(bytestream)
44 | cols = _read32(bytestream)
45 | buf = bytestream.read(rows * cols * num_images)
46 | data = numpy.frombuffer(buf, dtype=numpy.uint8)
47 | data = data.reshape(num_images, rows, cols, 1)
48 | return data
49 |
50 |
51 | def dense_to_one_hot(labels_dense, num_classes=10):
52 | """Convert class labels from scalars to one-hot vectors."""
53 | num_labels = labels_dense.shape[0]
54 | index_offset = numpy.arange(num_labels) * num_classes
55 | labels_one_hot = numpy.zeros((num_labels, num_classes))
56 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
57 | return labels_one_hot
58 |
59 |
60 | def extract_labels(filename, one_hot=False):
61 | """Extract the labels into a 1D uint8 numpy array [index]."""
62 | print('Extracting', filename)
63 | with gzip.open(filename) as bytestream:
64 | magic = _read32(bytestream)
65 | if magic != 2049:
66 | raise ValueError(
67 | 'Invalid magic number %d in MNIST label file: %s' %
68 | (magic, filename))
69 | num_items = _read32(bytestream)
70 | buf = bytestream.read(num_items)
71 | labels = numpy.frombuffer(buf, dtype=numpy.uint8)
72 | if one_hot:
73 | return dense_to_one_hot(labels)
74 | return labels
75 |
76 |
77 | class DataSet(object):
78 |
79 | def __init__(self, images, labels, fake_data=False):
80 | if fake_data:
81 | self._num_examples = 10000
82 | else:
83 | assert images.shape[0] == labels.shape[0], (
84 | "images.shape: %s labels.shape: %s" % (images.shape,
85 | labels.shape))
86 | self._num_examples = images.shape[0]
87 |
88 | # Convert shape from [num examples, rows, columns, depth]
89 | # to [num examples, rows*columns] (assuming depth == 1)
90 | assert images.shape[3] == 1
91 | images = images.reshape(images.shape[0],
92 | images.shape[1] * images.shape[2])
93 | # Convert from [0, 255] -> [0.0, 1.0].
94 | images = images.astype(numpy.float32)
95 | images = numpy.multiply(images, 1.0 / 255.0)
96 | self._images = images
97 | self._labels = labels
98 | self._epochs_completed = 0
99 | self._index_in_epoch = 0
100 |
101 | @property
102 | def images(self):
103 | return self._images
104 |
105 | @property
106 | def labels(self):
107 | return self._labels
108 |
109 | @property
110 | def num_examples(self):
111 | return self._num_examples
112 |
113 | @property
114 | def epochs_completed(self):
115 | return self._epochs_completed
116 |
117 | def next_batch(self, batch_size, fake_data=False):
118 | """Return the next `batch_size` examples from this data set."""
119 | if fake_data:
120 | fake_image = [1.0 for _ in xrange(784)]
121 | fake_label = 0
122 | return [fake_image for _ in xrange(batch_size)], [
123 | fake_label for _ in xrange(batch_size)]
124 | start = self._index_in_epoch
125 | self._index_in_epoch += batch_size
126 | if self._index_in_epoch > self._num_examples:
127 | # Finished epoch
128 | self._epochs_completed += 1
129 | # Shuffle the data
130 | perm = numpy.arange(self._num_examples)
131 | numpy.random.shuffle(perm)
132 | self._images = self._images[perm]
133 | self._labels = self._labels[perm]
134 | # Start next epoch
135 | start = 0
136 | self._index_in_epoch = batch_size
137 | assert batch_size <= self._num_examples
138 | end = self._index_in_epoch
139 | return self._images[start:end], self._labels[start:end]
140 |
141 |
142 | def read_data_sets(train_dir, fake_data=False, one_hot=False):
143 | class DataSets(object):
144 | pass
145 | data_sets = DataSets()
146 |
147 | if fake_data:
148 | data_sets.train = DataSet([], [], fake_data=True)
149 | data_sets.validation = DataSet([], [], fake_data=True)
150 | data_sets.test = DataSet([], [], fake_data=True)
151 | return data_sets
152 |
153 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
154 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
155 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
156 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
157 | VALIDATION_SIZE = 5000
158 |
159 | local_file = maybe_download(TRAIN_IMAGES, train_dir)
160 | train_images = extract_images(local_file)
161 |
162 | local_file = maybe_download(TRAIN_LABELS, train_dir)
163 | train_labels = extract_labels(local_file, one_hot=one_hot)
164 |
165 | local_file = maybe_download(TEST_IMAGES, train_dir)
166 | test_images = extract_images(local_file)
167 |
168 | local_file = maybe_download(TEST_LABELS, train_dir)
169 | test_labels = extract_labels(local_file, one_hot=one_hot)
170 |
171 | validation_images = train_images[:VALIDATION_SIZE]
172 | validation_labels = train_labels[:VALIDATION_SIZE]
173 | train_images = train_images[VALIDATION_SIZE:]
174 | train_labels = train_labels[VALIDATION_SIZE:]
175 |
176 | data_sets.train = DataSet(train_images, train_labels)
177 | data_sets.validation = DataSet(validation_images, validation_labels)
178 | data_sets.test = DataSet(test_images, test_labels)
179 |
180 | return data_sets
181 |
--------------------------------------------------------------------------------
/5.CNN/CNNforMNIST.py:
--------------------------------------------------------------------------------
1 |
2 | import tensorflow as tf
3 | import numpy as np
4 | from tensorflow.examples.tutorials.mnist import input_data
5 | import time
6 |
7 | batch_size = 128
8 | test_size = 256
9 |
10 | def init_weights(shape):
11 | return tf.Variable(tf.random_normal(shape, stddev=0.01))
12 |
13 | # Filter weight vectors: w, w2, w3, w4, w_0
14 | def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
15 | l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
16 | strides=[1, 1, 1, 1], padding='SAME'))
17 | l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
18 | strides=[1, 2, 2, 1], padding='SAME')
19 | l1 = tf.nn.dropout(l1, p_keep_conv)
20 |
21 |
22 | l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
23 | strides=[1, 1, 1, 1], padding='SAME'))
24 | l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
25 | strides=[1, 2, 2, 1], padding='SAME')
26 | l2 = tf.nn.dropout(l2, p_keep_conv)
27 |
28 |
29 | l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
30 | strides=[1, 1, 1, 1], padding='SAME'))
31 | l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
32 | strides=[1, 2, 2, 1], padding='SAME')
33 | l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
34 | l3 = tf.nn.dropout(l3, p_keep_conv)
35 |
36 |
37 | l4 = tf.nn.relu(tf.matmul(l3, w4))
38 | l4 = tf.nn.dropout(l4, p_keep_hidden)
39 |
40 | pyx = tf.matmul(l4, w_o)
41 | return pyx
42 |
43 | # Read data
44 | mnist = input_data.read_data_sets("MNIST_DATA/", one_hot=True)
45 | trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
46 |
47 | # trx.reshape( n-inputs, image size, image size, depth )
48 | # this variable is input in model()
49 | trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
50 | teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
51 |
52 | X = tf.placeholder("float", [None, 28, 28, 1])
53 | Y = tf.placeholder("float", [None, 10])
54 |
55 | w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
56 | w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
57 | w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
58 | w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
59 | w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels)
60 |
61 | p_keep_conv = tf.placeholder("float")
62 | p_keep_hidden = tf.placeholder("float")
63 | py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
64 |
65 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
66 | train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
67 | predict_op = tf.argmax(py_x, 1)
68 | init = tf.global_variables_initializer()
69 |
70 | # Launch the graph in a session
71 | with tf.Session() as sess:
72 | # you need to initialize all variables
73 | start_time = time.time()
74 | sess.run(init)
75 |
76 | for i in range(100):
77 | training_batch = zip(range(0, len(trX), batch_size),
78 | range(batch_size, len(trX)+1, batch_size))
79 | for start, end in training_batch:
80 | sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
81 | p_keep_conv: 0.8, p_keep_hidden: 0.5})
82 |
83 | test_indices = np.arange(len(teX)) # Get A Test Batch
84 | np.random.shuffle(test_indices)
85 | test_indices = test_indices[0:test_size]
86 |
87 | print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
88 | sess.run(predict_op, feed_dict={X: teX[test_indices],
89 | Y: teY[test_indices],
90 | p_keep_conv: 1.0,
91 | p_keep_hidden: 1.0})))
92 |
93 | print("time elapsed: {:.2f}s".format(time.time() - start_time))
--------------------------------------------------------------------------------
/5.CNN/CNNforMNIST2.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.examples.tutorials.mnist import input_data
3 |
4 | tf.reset_default_graph() # remove the previous computation graph
5 |
6 |
7 | def CNN():
8 | # download the mnist data.
9 | mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
10 |
11 | # placeholder is used for feeding data.
12 | x = tf.placeholder("float", shape=[None, 784],
13 | name='x') # none represents variable length of dimension. 784 is the dimension of MNIST data.
14 | y_target = tf.placeholder("float", shape=[None, 10],
15 | name='y_target') # shape argument is optional, but this is useful to debug.
16 |
17 | # reshape input data
18 | x_image = tf.reshape(x, [-1, 28, 28, 1], name="x_image")
19 |
20 | # Build a convolutional layer and maxpooling with random initialization
21 | W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1),
22 | name="W_conv1") # W is [row, col, channel, feature]
23 | b_conv1 = tf.Variable(tf.zeros([32]), name="b_conv1")
24 | h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1, name="h_conv1")
25 | h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name="h_pool1")
26 |
27 | # Repeat again with 64 number of filters
28 | W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1),
29 | name="W_conv2") # W is [row, col, channel, feature]
30 | b_conv2 = tf.Variable(tf.zeros([64]), name="b_conv2")
31 | h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2, name="h_conv2")
32 | h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name="h_pool2")
33 |
34 | # Build a fully connected layer
35 | h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64], name="h_pool2_flat")
36 | W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name='W_fc1')
37 | b_fc1 = tf.Variable(tf.zeros([1024]), name='b_fc1')
38 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name="h_fc1")
39 |
40 | # Dropout Layer
41 | keep_prob = tf.placeholder("float", name="keep_prob")
42 | h_fc1 = tf.nn.dropout(h_fc1, keep_prob, name="h_fc1_drop")
43 |
44 | # Build a fully connected layer with softmax
45 | W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name='W_fc2')
46 | b_fc2 = tf.Variable(tf.zeros([10]), name='b_fc2')
47 | y = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2, name="y")
48 |
49 | # define the Loss function
50 | cross_entropy = -tf.reduce_sum(y_target * tf.log(y), name='cross_entropy')
51 |
52 | # define optimization algorithm
53 | # train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
54 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
55 |
56 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_target, 1))
57 | # correct_prediction is list of boolean which is the result of comparing(model prediction , data)
58 |
59 |
60 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
61 | # tf.cast() : changes true -> 1 / false -> 0
62 | # tf.reduce_mean() : calculate the mean
63 |
64 | # Create Session
65 | sess = tf.Session(config=tf.ConfigProto(
66 | gpu_options=tf.GPUOptions(allow_growth=True))) # open a session which is a envrionment of computation graph.
67 | sess.run(tf.global_variables_initializer()) # initialize the variables
68 |
69 | # create summary of parameters
70 | tf.summary.histogram('weights_1', W_conv1)
71 | tf.summary.histogram('weights_2', W_conv2)
72 | tf.summary.histogram('y', y)
73 | tf.summary.scalar('cross_entropy', cross_entropy)
74 | merged = tf.summary.merge_all()
75 | summary_writer = tf.summary.FileWriter("/tmp/cnn", sess.graph)
76 |
77 | # training the MLP
78 | for i in range(5001): # minibatch iteraction
79 | batch = mnist.train.next_batch(100) # minibatch size
80 | sess.run(train_step, feed_dict={x: batch[0], y_target: batch[1],
81 | keep_prob: 0.5}) # placeholder's none length is replaced by i:i+100 indexes
82 |
83 | if i % 500 == 0:
84 | train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y_target: batch[1], keep_prob: 1})
85 | print("step %d, training accuracy: %.3f" % (i, train_accuracy))
86 |
87 | # calculate the summary and write.
88 | summary = sess.run(merged, feed_dict={x: batch[0], y_target: batch[1], keep_prob: 1})
89 | summary_writer.add_summary(summary, i)
90 |
91 | # for given x, y_target data set
92 | print("test accuracy: %g" % sess.run(accuracy,
93 | feed_dict={x: mnist.test.images[0:150], y_target: mnist.test.labels[0:150],
94 | keep_prob: 1}))
95 | sess.close()
96 |
97 |
98 | CNN()
--------------------------------------------------------------------------------
/5.CNN/Cnn_layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/Cnn_layer.png
--------------------------------------------------------------------------------
/5.CNN/MNIST_DATA/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/MNIST_DATA/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/5.CNN/MNIST_DATA/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/MNIST_DATA/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/5.CNN/MNIST_DATA/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/MNIST_DATA/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/5.CNN/MNIST_DATA/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/MNIST_DATA/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/5.CNN/big_cnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/big_cnn.png
--------------------------------------------------------------------------------
/5.CNN/googleLayers.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Google Inc. All Rights Reserved.
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | # Unless required by applicable law or agreed to in writing, software
7 | # distributed under the License is distributed on an "AS IS" BASIS,
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 | # See the License for the specific language governing permissions and
10 | # limitations under the License.
11 | """Utility functions for adding layers to a Model.
12 | NB: This is used by PrettyTensor, but it will be deprecated. Please do not use!
13 | """
14 | import math
15 |
16 | import tensorflow as tf
17 |
18 | # Implementation note: this takes a tuple for an activation instead of
19 | # encouraging lambdas so that we can inspect the actual function and add
20 | # appropriate summaries.
21 |
22 | def add_l2loss(books, params, l2loss, name='weight_decay'):
23 | if l2loss:
24 | books.add_loss(
25 | tf.multiply(tf.nn.l2_loss(params), l2loss, name=name),
26 | regularization=True)
27 |
28 |
29 | def xavier_init(n_inputs, n_outputs, uniform=True):
30 | """Set the parameter initialization using the method described.
31 | This method is designed to keep the scale of the gradients roughly the same
32 | in all layers.
33 | Xavier Glorot and Yoshua Bengio (2010):
34 | Understanding the difficulty of training deep feedforward neural
35 | networks. International conference on artificial intelligence and
36 | statistics.
37 | Args:
38 | n_inputs: The number of input nodes into each output.
39 | n_outputs: The number of output nodes for each input.
40 | uniform: If true use a uniform distribution, otherwise use a normal.
41 | Returns:
42 | An initializer.
43 | """
44 | if uniform:
45 | # 6 was used in the paper.
46 | init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
47 | return tf.random_uniform_initializer(-init_range, init_range)
48 | else:
49 | # 3 gives us approximately the same limits as above since this repicks
50 | # values greater than 2 standard deviations from the mean.
51 | stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
52 | return tf.truncated_normal_initializer(stddev=stddev)
53 |
54 |
55 | def spatial_slice_zeros(x):
56 | """Experimental summary that shows how many planes are unused for a batch."""
57 | return tf.cast(tf.reduce_all(tf.less_equal(x, 0.0), [0, 1, 2]),
58 | tf.float32)
--------------------------------------------------------------------------------
/5.CNN/input_data.py:
--------------------------------------------------------------------------------
1 | """Functions for downloading and reading MNIST data."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import gzip
7 | import os
8 |
9 | import numpy
10 | from six.moves import urllib
11 | from six.moves import xrange # pylint: disable=redefined-builtin
12 |
13 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
14 |
15 |
16 | def maybe_download(filename, work_directory):
17 | """Download the data from Yann's website, unless it's already here."""
18 | if not os.path.exists(work_directory):
19 | os.mkdir(work_directory)
20 | filepath = os.path.join(work_directory, filename)
21 | if not os.path.exists(filepath):
22 | filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
23 | statinfo = os.stat(filepath)
24 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
25 | return filepath
26 |
27 |
28 | def _read32(bytestream):
29 | dt = numpy.dtype(numpy.uint32).newbyteorder('>')
30 | return numpy.frombuffer(bytestream.read(4), dtype=dt)
31 |
32 |
33 | def extract_images(filename):
34 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
35 | print('Extracting', filename)
36 | with gzip.open(filename) as bytestream:
37 | magic = _read32(bytestream)
38 | if magic != 2051:
39 | raise ValueError(
40 | 'Invalid magic number %d in MNIST image file: %s' %
41 | (magic, filename))
42 | num_images = _read32(bytestream)
43 | rows = _read32(bytestream)
44 | cols = _read32(bytestream)
45 | buf = bytestream.read(rows * cols * num_images)
46 | data = numpy.frombuffer(buf, dtype=numpy.uint8)
47 | data = data.reshape(num_images, rows, cols, 1)
48 | return data
49 |
50 |
51 | def dense_to_one_hot(labels_dense, num_classes=10):
52 | """Convert class labels from scalars to one-hot vectors."""
53 | num_labels = labels_dense.shape[0]
54 | index_offset = numpy.arange(num_labels) * num_classes
55 | labels_one_hot = numpy.zeros((num_labels, num_classes))
56 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
57 | return labels_one_hot
58 |
59 |
60 | def extract_labels(filename, one_hot=False):
61 | """Extract the labels into a 1D uint8 numpy array [index]."""
62 | print('Extracting', filename)
63 | with gzip.open(filename) as bytestream:
64 | magic = _read32(bytestream)
65 | if magic != 2049:
66 | raise ValueError(
67 | 'Invalid magic number %d in MNIST label file: %s' %
68 | (magic, filename))
69 | num_items = _read32(bytestream)
70 | buf = bytestream.read(num_items)
71 | labels = numpy.frombuffer(buf, dtype=numpy.uint8)
72 | if one_hot:
73 | return dense_to_one_hot(labels)
74 | return labels
75 |
76 |
77 | class DataSet(object):
78 |
79 | def __init__(self, images, labels, fake_data=False):
80 | if fake_data:
81 | self._num_examples = 10000
82 | else:
83 | assert images.shape[0] == labels.shape[0], (
84 | "images.shape: %s labels.shape: %s" % (images.shape,
85 | labels.shape))
86 | self._num_examples = images.shape[0]
87 |
88 | # Convert shape from [num examples, rows, columns, depth]
89 | # to [num examples, rows*columns] (assuming depth == 1)
90 | assert images.shape[3] == 1
91 | images = images.reshape(images.shape[0],
92 | images.shape[1] * images.shape[2])
93 | # Convert from [0, 255] -> [0.0, 1.0].
94 | images = images.astype(numpy.float32)
95 | images = numpy.multiply(images, 1.0 / 255.0)
96 | self._images = images
97 | self._labels = labels
98 | self._epochs_completed = 0
99 | self._index_in_epoch = 0
100 |
101 | @property
102 | def images(self):
103 | return self._images
104 |
105 | @property
106 | def labels(self):
107 | return self._labels
108 |
109 | @property
110 | def num_examples(self):
111 | return self._num_examples
112 |
113 | @property
114 | def epochs_completed(self):
115 | return self._epochs_completed
116 |
117 | def next_batch(self, batch_size, fake_data=False):
118 | """Return the next `batch_size` examples from this data set."""
119 | if fake_data:
120 | fake_image = [1.0 for _ in xrange(784)]
121 | fake_label = 0
122 | return [fake_image for _ in xrange(batch_size)], [
123 | fake_label for _ in xrange(batch_size)]
124 | start = self._index_in_epoch
125 | self._index_in_epoch += batch_size
126 | if self._index_in_epoch > self._num_examples:
127 | # Finished epoch
128 | self._epochs_completed += 1
129 | # Shuffle the data
130 | perm = numpy.arange(self._num_examples)
131 | numpy.random.shuffle(perm)
132 | self._images = self._images[perm]
133 | self._labels = self._labels[perm]
134 | # Start next epoch
135 | start = 0
136 | self._index_in_epoch = batch_size
137 | assert batch_size <= self._num_examples
138 | end = self._index_in_epoch
139 | return self._images[start:end], self._labels[start:end]
140 |
141 |
142 | def read_data_sets(train_dir, fake_data=False, one_hot=False):
143 | class DataSets(object):
144 | pass
145 | data_sets = DataSets()
146 |
147 | if fake_data:
148 | data_sets.train = DataSet([], [], fake_data=True)
149 | data_sets.validation = DataSet([], [], fake_data=True)
150 | data_sets.test = DataSet([], [], fake_data=True)
151 | return data_sets
152 |
153 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
154 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
155 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
156 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
157 | VALIDATION_SIZE = 5000
158 |
159 | local_file = maybe_download(TRAIN_IMAGES, train_dir)
160 | train_images = extract_images(local_file)
161 |
162 | local_file = maybe_download(TRAIN_LABELS, train_dir)
163 | train_labels = extract_labels(local_file, one_hot=one_hot)
164 |
165 | local_file = maybe_download(TEST_IMAGES, train_dir)
166 | test_images = extract_images(local_file)
167 |
168 | local_file = maybe_download(TEST_LABELS, train_dir)
169 | test_labels = extract_labels(local_file, one_hot=one_hot)
170 |
171 | validation_images = train_images[:VALIDATION_SIZE]
172 | validation_labels = train_labels[:VALIDATION_SIZE]
173 | train_images = train_images[VALIDATION_SIZE:]
174 | train_labels = train_labels[VALIDATION_SIZE:]
175 |
176 | data_sets.train = DataSet(train_images, train_labels)
177 | data_sets.validation = DataSet(validation_images, validation_labels)
178 | data_sets.test = DataSet(test_images, test_labels)
179 |
180 | return data_sets
181 |
--------------------------------------------------------------------------------
/5.CNN/mycnn.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/5.CNN/mycnn.JPG
--------------------------------------------------------------------------------
/5.CNN/tf_board.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from tensorflow.examples.tutorials.mnist import input_data
4 | import time
5 |
6 | batch_size = 125
7 | test_size = 200
8 | training_epochs = 20
9 |
10 | def init_weights(shape,tag):
11 | #return tf.Variable(tf.random_normal(shape, stddev=0.01))
12 | return tf.Variable(tf.truncated_normal(shape, stddev=0.1),name=tag)
13 |
14 | # Filter weight vectors 또는 kernel: w, w2, w3, w4, w_0
15 | def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
16 |
17 | l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
18 | strides=[1, 1, 1, 1], padding='SAME'))
19 | l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
20 | strides=[1, 2, 2, 1], padding='SAME')
21 | l1 = tf.nn.dropout(l1, p_keep_conv)
22 |
23 |
24 | l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
25 | strides=[1, 1, 1, 1], padding='SAME'))
26 | l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
27 | strides=[1, 2, 2, 1], padding='SAME')
28 | l2 = tf.nn.dropout(l2, p_keep_conv)
29 |
30 |
31 | l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
32 | strides=[1, 1, 1, 1], padding='SAME'))
33 | l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
34 | strides=[1, 2, 2, 1], padding='SAME')
35 | l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
36 | l3 = tf.nn.dropout(l3, p_keep_conv)
37 |
38 |
39 | l4 = tf.nn.relu(tf.matmul(l3, w4))
40 | l4 = tf.nn.dropout(l4, p_keep_hidden)
41 |
42 | pyx = tf.matmul(l4, w_o, name="y")
43 | return pyx
44 |
45 | # Read data
46 | mnist = input_data.read_data_sets("MNIST_DATA/", one_hot=True)
47 | #trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
48 |
49 | # trx.reshape( n-inputs, image size, image size, depth )
50 | # this variable is input in model()
51 | #trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
52 | #teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
53 |
54 | #X = tf.placeholder("float", [None, 28, 28, 1], name = 'X')
55 | X = tf.placeholder("float", shape=[None, 784], name = 'x') # none represents variable length of dimension. 784 is the dimension of MNIST data.
56 | Y = tf.placeholder("float", [None, 10], name = 'y')
57 |
58 | # reshape input data
59 | x_image = tf.reshape(X, [-1,28,28,1], name="x_image")
60 |
61 | w = init_weights([3, 3, 1, 32],"W_conv1") # 3x3x1 conv, 32 outputs
62 | w2 = init_weights([3, 3, 32, 64],"W_conv2") # 3x3x32 conv, 64 outputs
63 | w3 = init_weights([3, 3, 64, 128],"W_conv3") # 3x3x32 conv, 128 outputs
64 | w4 = init_weights([128 * 4 * 4, 625],"FC_1") # FC 128 * 4 * 4 inputs, 625 outputs
65 | w_o = init_weights([625, 10],"FC_1") # FC 625 inputs, 10 outputs (labels)
66 |
67 | p_keep_conv = tf.placeholder("float")
68 | p_keep_hidden = tf.placeholder("float")
69 |
70 | py_x = model(x_image, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
71 |
72 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y),name="cross_entropy")
73 | train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
74 | predict_op = tf.argmax(py_x, 1)
75 |
76 |
77 | # create summary of parameters
78 | tf.summary.histogram('W_conv1', w)
79 | tf.summary.histogram('W_conv2', w2)
80 | tf.summary.histogram('y', py_x)
81 | tf.summary.scalar('cross_entropy', cost)
82 | # init
83 | init = tf.global_variables_initializer()
84 |
85 | # Launch the graph in a session
86 | with tf.Session() as sess:
87 | #tensorboard
88 | merged = tf.summary.merge_all()
89 | summary_writer = tf.summary.FileWriter("/tmp/mnistCNN", sess.graph)
90 |
91 | # you need to initialize all variables
92 | start_time = time.time()
93 | sess.run(init)
94 |
95 | for i in range(training_epochs):
96 | avg_cost = 0.
97 | avg_training_accuracy = 0.
98 | total_batch = int(mnist.train.num_examples/batch_size)
99 |
100 | for step in range(total_batch):
101 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
102 | #batch_xs_image = batch_xs.reshape(-1, 28, 28, 1)
103 |
104 | sess.run(train_op, feed_dict={X: batch_xs, Y: batch_ys,
105 | p_keep_conv: 0.8, p_keep_hidden: 0.5})
106 |
107 | # Training average cost 계산
108 | avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, p_keep_conv:1.0, p_keep_hidden:1.0})/total_batch
109 |
110 | avg_training_accuracy += (np.mean(np.argmax(batch_ys, axis=1) ==
111 | sess.run(predict_op, feed_dict={X: batch_xs,
112 | Y: batch_ys,
113 | p_keep_conv: 1.0,
114 | p_keep_hidden: 1.0})))/total_batch
115 |
116 |
117 | print("Epoch: %d, training error: %.4f, training accuracy: %.4f"%(i,avg_cost,avg_training_accuracy))
118 | # tensorboard를 위해서 기록한다.
119 | summary = sess.run(merged, feed_dict={X: batch_xs, Y: batch_ys, p_keep_conv:1.0, p_keep_hidden:1.0})
120 | summary_writer.add_summary(summary , i)
121 |
122 |
123 | # testing accuracy 계산
124 | # 인덱스를 뒤 썩어 준다. 랜덤하게 200개 추출을 위해서
125 | test_indices = np.arange(mnist.test.labels.shape[0]) # Get A Test Batch
126 | np.random.shuffle(test_indices)
127 | test_indices = test_indices[0:test_size] # 200개만 선택한다.
128 |
129 | #teX = mnist.test.images[test_indices].reshape(-1, 28, 28, 1) # input을 2차원 image를 담은 3차원 matrix로 표현
130 | teX = mnist.test.images[test_indices]
131 | testing_accuracy = np.mean(np.argmax(mnist.test.labels[test_indices], axis=1) ==
132 | sess.run(predict_op, feed_dict={X: teX,
133 | Y: mnist.test.labels[test_indices],
134 | p_keep_conv: 1.0,
135 | p_keep_hidden: 1.0}))
136 | print("Testing Accuracy: %.4f"%(testing_accuracy))
137 |
138 | # shuffled testing data 200개에 대해서 accuracy 1.0에 도달하면 Training을 멈춘다.
139 | if testing_accuracy == 1.0:
140 | print("Early stop..")
141 | break
142 |
143 | print("time elapsed: {:.2f}s".format(time.time() - start_time))
144 |
--------------------------------------------------------------------------------
/6.Early Stop and Index Shuffling/EarlyStop.py:
--------------------------------------------------------------------------------
1 | # Early Stop
2 |
3 | import tensorflow as tf
4 | from tensorflow.contrib.learn.python.learn.datasets.base import load_iris
5 | import numpy as np
6 |
7 | tf.reset_default_graph()
8 |
9 |
10 | def MLP_iris():
11 | # load the iris data.
12 | iris = load_iris()
13 |
14 | np.random.seed(0)
15 | random_index = np.random.permutation(150)
16 |
17 | iris_data = iris.data[random_index]
18 | iris_target = iris.target[random_index]
19 | iris_target_onehot = np.zeros((150, 3))
20 | iris_target_onehot[np.arange(150), iris_target] = 1
21 |
22 | accuracy_list = []
23 |
24 | # build computation graph
25 | x = tf.placeholder("float", shape=[None, 4], name='x')
26 | y_target = tf.placeholder("float", shape=[None, 3], name='y_target')
27 |
28 | W1 = tf.Variable(tf.zeros([4, 128]), name='W1')
29 | b1 = tf.Variable(tf.zeros([128]), name='b1')
30 | h1 = tf.sigmoid(tf.matmul(x, W1) + b1, name='h1')
31 |
32 | W2 = tf.Variable(tf.zeros([128, 3]), name='W2')
33 | b2 = tf.Variable(tf.zeros([3]), name='b2')
34 | y = tf.nn.softmax(tf.matmul(h1, W2) + b2, name='y')
35 |
36 | cross_entropy = -tf.reduce_sum(y_target * tf.log(y), name='cross_entropy')
37 |
38 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
39 |
40 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_target, 1))
41 |
42 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
43 |
44 | sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
45 | sess.run(tf.global_variables_initializer())
46 |
47 | for i in range(500):
48 | sess.run(train_step, feed_dict={x: iris_data[0:100], y_target: iris_target_onehot[0:100]})
49 |
50 | train_accuracy = sess.run(accuracy, feed_dict={x: iris_data[0:100], y_target: iris_target_onehot[0:100]})
51 | validation_accuracy = sess.run(accuracy, feed_dict={x: iris_data[100:], y_target: iris_target_onehot[100:]})
52 | print (
53 | "step %d, training accuracy: %.3f / validation accuracy: %.3f" % (i, train_accuracy, validation_accuracy))
54 |
55 | accuracy_list.append(validation_accuracy)
56 |
57 | if i >= 50:
58 | if validation_accuracy - np.mean(accuracy_list[int(round(len(accuracy_list) / 2)):]) <= 0.01:
59 | break
60 |
61 | sess.close()
62 |
63 |
64 | MLP_iris()
--------------------------------------------------------------------------------
/7.TensorBoard/MNIST_DATA/data/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/MNIST_DATA/data/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/7.TensorBoard/MNIST_DATA/data/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/MNIST_DATA/data/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/7.TensorBoard/MNIST_DATA/data/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/MNIST_DATA/data/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/7.TensorBoard/MNIST_DATA/data/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/MNIST_DATA/data/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/7.TensorBoard/TensorBoard.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {
7 | "collapsed": false
8 | },
9 | "outputs": [
10 | {
11 | "name": "stdout",
12 | "output_type": "stream",
13 | "text": [
14 | "WARNING:tensorflow:From :30 in .: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
15 | "Instructions for updating:\n",
16 | "Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
17 | "WARNING:tensorflow:From :39 in .: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
18 | "Instructions for updating:\n",
19 | "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
20 | "WARNING:tensorflow:From :40 in .: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
21 | "Instructions for updating:\n",
22 | "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
23 | "WARNING:tensorflow:From :42 in .: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
24 | "Instructions for updating:\n",
25 | "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
26 | "WARNING:tensorflow:From :43 in .: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
27 | "Instructions for updating:\n",
28 | "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
29 | "WARNING:tensorflow:From :45 in .: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
30 | "Instructions for updating:\n",
31 | "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
32 | "WARNING:tensorflow:From :49 in .: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
33 | "Instructions for updating:\n",
34 | "Use `tf.global_variables_initializer` instead.\n",
35 | "WARNING:tensorflow:From :55 in .: merge_all_summaries (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
36 | "Instructions for updating:\n",
37 | "Please switch to tf.summary.merge_all.\n",
38 | "WARNING:tensorflow:From /home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/ops/logging_ops.py:264 in merge_all_summaries.: merge_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
39 | "Instructions for updating:\n",
40 | "Please switch to tf.summary.merge.\n",
41 | "WARNING:tensorflow:From :56 in .: SummaryWriter.__init__ (from tensorflow.python.training.summary_io) is deprecated and will be removed after 2016-11-30.\n",
42 | "Instructions for updating:\n",
43 | "Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.\n",
44 | "WARNING:tensorflow:Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.\n"
45 | ]
46 | },
47 | {
48 | "ename": "InvalidArgumentError",
49 | "evalue": "You must feed a value for placeholder tensor 'X-input' with dtype float\n\t [[Node: X-input = Placeholder[dtype=DT_FLOAT, shape=[], _device=\"/job:localhost/replica:0/task:0/gpu:0\"]()]]\n\t [[Node: Bias2_1/read/_15 = _Recv[client_terminated=false, recv_device=\"/job:localhost/replica:0/task:0/cpu:0\", send_device=\"/job:localhost/replica:0/task:0/gpu:0\", send_device_incarnation=1, tensor_name=\"edge_133_Bias2_1/read\", tensor_type=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"]()]]\n\nCaused by op 'X-input', defined at:\n File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/__main__.py\", line 3, in \n app.launch_new_instance()\n File \"/home/jemin/.local/lib/python3.5/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelapp.py\", line 474, in start\n ioloop.IOLoop.instance().start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/ioloop.py\", line 177, in start\n super(ZMQIOLoop, self).start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/ioloop.py\", line 887, in start\n handler_func(fd_obj, events)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/stack_context.py\", line 275, in null_wrapper\n return fn(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 440, in _handle_events\n self._handle_recv()\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 472, in _handle_recv\n self._run_callback(callback, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 414, in _run_callback\n callback(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/stack_context.py\", line 275, in null_wrapper\n return fn(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 276, in dispatcher\n return self.dispatch_shell(stream, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 228, in dispatch_shell\n handler(stream, idents, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 390, in execute_request\n user_expressions, allow_stdin)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/ipkernel.py\", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/zmqshell.py\", line 501, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2717, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2821, in run_ast_nodes\n if self.run_code(code, result):\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2881, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 10, in \n X = tf.placeholder(tf.float32, name='X-input')\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py\", line 1587, in placeholder\n name=name)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/ops/gen_array_ops.py\", line 2043, in _placeholder\n name=name)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py\", line 759, in apply_op\n op_def=op_def)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 2240, in create_op\n original_op=self._default_original_op, op_def=op_def)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 1128, in __init__\n self._traceback = _extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'X-input' with dtype float\n\t [[Node: X-input = Placeholder[dtype=DT_FLOAT, shape=[], _device=\"/job:localhost/replica:0/task:0/gpu:0\"]()]]\n\t [[Node: Bias2_1/read/_15 = _Recv[client_terminated=false, recv_device=\"/job:localhost/replica:0/task:0/cpu:0\", send_device=\"/job:localhost/replica:0/task:0/gpu:0\", send_device_incarnation=1, tensor_name=\"edge_133_Bias2_1/read\", tensor_type=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"]()]]\n",
50 | "traceback": [
51 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
52 | "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
53 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1021\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1022\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
54 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1003\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1004\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
55 | "\u001b[0;32m/usr/lib/python3.5/contextlib.py\u001b[0m in \u001b[0;36m__exit__\u001b[0;34m(self, type, value, traceback)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
56 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py\u001b[0m in \u001b[0;36mraise_exception_on_not_ok_status\u001b[0;34m()\u001b[0m\n\u001b[1;32m 468\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpywrap_tensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 469\u001b[0;31m pywrap_tensorflow.TF_GetCode(status))\n\u001b[0m\u001b[1;32m 470\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
57 | "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'X-input' with dtype float\n\t [[Node: X-input = Placeholder[dtype=DT_FLOAT, shape=[], _device=\"/job:localhost/replica:0/task:0/gpu:0\"]()]]\n\t [[Node: Bias2_1/read/_15 = _Recv[client_terminated=false, recv_device=\"/job:localhost/replica:0/task:0/cpu:0\", send_device=\"/job:localhost/replica:0/task:0/gpu:0\", send_device_incarnation=1, tensor_name=\"edge_133_Bias2_1/read\", tensor_type=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"]()]]",
58 | "\nDuring handling of the above exception, another exception occurred:\n",
59 | "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)",
60 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mx_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0my_data\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;36m200\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0msummary\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmerged\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mx_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0my_data\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_summary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mprint\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcost\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mx_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0my_data\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mW1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mW2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
61 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 764\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 765\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 766\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 767\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 768\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
62 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 962\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 963\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 964\u001b[0;31m feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m 965\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 966\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
63 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1012\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1013\u001b[0m return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1014\u001b[0;31m target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m 1015\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1016\u001b[0m return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
64 | "\u001b[0;32m/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1032\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1033\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1034\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1035\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1036\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
65 | "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'X-input' with dtype float\n\t [[Node: X-input = Placeholder[dtype=DT_FLOAT, shape=[], _device=\"/job:localhost/replica:0/task:0/gpu:0\"]()]]\n\t [[Node: Bias2_1/read/_15 = _Recv[client_terminated=false, recv_device=\"/job:localhost/replica:0/task:0/cpu:0\", send_device=\"/job:localhost/replica:0/task:0/gpu:0\", send_device_incarnation=1, tensor_name=\"edge_133_Bias2_1/read\", tensor_type=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"]()]]\n\nCaused by op 'X-input', defined at:\n File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/__main__.py\", line 3, in \n app.launch_new_instance()\n File \"/home/jemin/.local/lib/python3.5/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n app.start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelapp.py\", line 474, in start\n ioloop.IOLoop.instance().start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/ioloop.py\", line 177, in start\n super(ZMQIOLoop, self).start()\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/ioloop.py\", line 887, in start\n handler_func(fd_obj, events)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/stack_context.py\", line 275, in null_wrapper\n return fn(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 440, in _handle_events\n self._handle_recv()\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 472, in _handle_recv\n self._run_callback(callback, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py\", line 414, in _run_callback\n callback(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tornado/stack_context.py\", line 275, in null_wrapper\n return fn(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 276, in dispatcher\n return self.dispatch_shell(stream, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 228, in dispatch_shell\n handler(stream, idents, msg)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py\", line 390, in execute_request\n user_expressions, allow_stdin)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/ipkernel.py\", line 196, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/home/jemin/.local/lib/python3.5/site-packages/ipykernel/zmqshell.py\", line 501, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2717, in run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2821, in run_ast_nodes\n if self.run_code(code, result):\n File \"/home/jemin/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py\", line 2881, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 10, in \n X = tf.placeholder(tf.float32, name='X-input')\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py\", line 1587, in placeholder\n name=name)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/ops/gen_array_ops.py\", line 2043, in _placeholder\n name=name)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py\", line 759, in apply_op\n op_def=op_def)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 2240, in create_op\n original_op=self._default_original_op, op_def=op_def)\n File \"/home/jemin/.local/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\", line 1128, in __init__\n self._traceback = _extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'X-input' with dtype float\n\t [[Node: X-input = Placeholder[dtype=DT_FLOAT, shape=[], _device=\"/job:localhost/replica:0/task:0/gpu:0\"]()]]\n\t [[Node: Bias2_1/read/_15 = _Recv[client_terminated=false, recv_device=\"/job:localhost/replica:0/task:0/cpu:0\", send_device=\"/job:localhost/replica:0/task:0/gpu:0\", send_device_incarnation=1, tensor_name=\"edge_133_Bias2_1/read\", tensor_type=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/cpu:0\"]()]]\n"
66 | ],
67 | "output_type": "error"
68 | }
69 | ],
70 | "source": [
71 | "import tensorflow as tf\n",
72 | "import numpy as np\n",
73 | "\n",
74 | "xy = np.loadtxt('XORtrain.txt', unpack=True)\n",
75 | "\n",
76 | "\n",
77 | "x_data = np.transpose(xy[0:-1])\n",
78 | "y_data = np.reshape(xy[-1], (4, 1))\n",
79 | "\n",
80 | "X = tf.placeholder(tf.float32, name='X-input')\n",
81 | "Y = tf.placeholder(tf.float32, name='Y-input')\n",
82 | "\n",
83 | "W1 = tf.Variable(tf.random_uniform([2, 2], -1.0, 1.0), name='Weight1')\n",
84 | "W2 = tf.Variable(tf.random_uniform([2, 1], -1.0, 1.0), name='Weight2')\n",
85 | "\n",
86 | "b1 = tf.Variable(tf.zeros([2]), name=\"Bias1\")\n",
87 | "b2 = tf.Variable(tf.zeros([1]), name=\"Bias2\")\n",
88 | "\n",
89 | "\n",
90 | "# Our hypothesis\n",
91 | "with tf.name_scope(\"layer2\") as scope:\n",
92 | " L2 = tf.sigmoid(tf.matmul(X, W1) + b1)\n",
93 | "\n",
94 | "with tf.name_scope(\"layer3\") as scope:\n",
95 | " hypothesis = tf.sigmoid(tf.matmul(L2, W2) + b2)\n",
96 | "\n",
97 | "# Cost function\n",
98 | "with tf.name_scope(\"cost\") as scope:\n",
99 | " cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))\n",
100 | " cost_summ = tf.scalar_summary(\"cost\", cost)\n",
101 | "\n",
102 | "# Minimize\n",
103 | "with tf.name_scope(\"train\") as scope:\n",
104 | " a = tf.Variable(0.1) # Learning rate, alpha\n",
105 | " optimizer = tf.train.GradientDescentOptimizer(a)\n",
106 | " train = optimizer.minimize(cost)\n",
107 | "\n",
108 | "# Add histogram\n",
109 | "w1_hist = tf.histogram_summary(\"weights1\", W1)\n",
110 | "w2_hist = tf.histogram_summary(\"weights2\", W2)\n",
111 | "\n",
112 | "b1_hist = tf.histogram_summary(\"biases1\", b1)\n",
113 | "b2_hist = tf.histogram_summary(\"biases2\", b2)\n",
114 | "\n",
115 | "y_hist = tf.histogram_summary(\"y\", Y)\n",
116 | "\n",
117 | "\n",
118 | "# Before starting, initialize the variables. We will `run` this first.\n",
119 | "init = tf.initialize_all_variables()\n",
120 | "\n",
121 | "\n",
122 | "# Launch the graph,\n",
123 | "with tf.Session() as sess:\n",
124 | " # tensorboard --logdir=./logs/xor_logs\n",
125 | " merged = tf.merge_all_summaries()\n",
126 | " writer = tf.train.SummaryWriter(\"./logs/xor_logs\", sess.graph_def)\n",
127 | "\n",
128 | " sess.run(init)\n",
129 | " # Fit the line.\n",
130 | " for step in range(20000):\n",
131 | " sess.run(train, feed_dict={X:x_data, Y:y_data})\n",
132 | " if step % 200 == 0:\n",
133 | " summary = sess.run(merged, feed_dict={X:x_data, Y:y_data})\n",
134 | " writer.add_summary(summary, step)\n",
135 | " print (step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W1), sess.run(W2))\n",
136 | "\n",
137 | " # Test model\n",
138 | " correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)\n",
139 | " # Calculate accuracy\n",
140 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
141 | " print (sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data}))\n",
142 | " print (\"Accuracy:\", accuracy.eval({X:x_data, Y:y_data}))"
143 | ]
144 | },
145 | {
146 | "cell_type": "code",
147 | "execution_count": null,
148 | "metadata": {
149 | "collapsed": true
150 | },
151 | "outputs": [],
152 | "source": [
153 | ""
154 | ]
155 | }
156 | ],
157 | "metadata": {
158 | "kernelspec": {
159 | "display_name": "Python 3",
160 | "language": "python",
161 | "name": "python3"
162 | },
163 | "language_info": {
164 | "codemirror_mode": {
165 | "name": "ipython",
166 | "version": 3.0
167 | },
168 | "file_extension": ".py",
169 | "mimetype": "text/x-python",
170 | "name": "python",
171 | "nbconvert_exporter": "python",
172 | "pygments_lexer": "ipython3",
173 | "version": "3.5.2"
174 | }
175 | },
176 | "nbformat": 4,
177 | "nbformat_minor": 0
178 | }
--------------------------------------------------------------------------------
/7.TensorBoard/TensorBoard.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 | xy = np.loadtxt('XORtrain.txt', unpack=True)
5 |
6 |
7 | x_data = np.transpose(xy[0:-1])
8 | y_data = np.reshape(xy[-1], (4, 1))
9 |
10 | X = tf.placeholder(tf.float32, name='X-input')
11 | Y = tf.placeholder(tf.float32, name='Y-input')
12 |
13 | W1 = tf.Variable(tf.random_uniform([2, 2], -1.0, 1.0), name='Weight1')
14 | W2 = tf.Variable(tf.random_uniform([2, 1], -1.0, 1.0), name='Weight2')
15 |
16 | b1 = tf.Variable(tf.zeros([2]), name="Bias1")
17 | b2 = tf.Variable(tf.zeros([1]), name="Bias2")
18 |
19 |
20 | # Our hypothesis
21 | with tf.name_scope("layer2") as scope:
22 | L2 = tf.sigmoid(tf.matmul(X, W1) + b1)
23 |
24 | with tf.name_scope("layer3") as scope:
25 | hypothesis = tf.sigmoid(tf.matmul(L2, W2) + b2)
26 |
27 | # Cost function
28 | with tf.name_scope("cost") as scope:
29 | cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
30 | cost_summ = tf.summary.scalar("cost", cost)
31 |
32 | # Minimize
33 | with tf.name_scope("train") as scope:
34 | a = tf.Variable(0.1) # Learning rate, alpha
35 | optimizer = tf.train.GradientDescentOptimizer(a)
36 | train = optimizer.minimize(cost)
37 |
38 | # Add histogram
39 | w1_hist = tf.summary.histogram("weights1", W1)
40 | w2_hist = tf.summary.histogram("weights2", W2)
41 |
42 | b1_hist = tf.summary.histogram("biases1", b1)
43 | b2_hist = tf.summary.histogram("biases2", b2)
44 |
45 | y_hist = tf.summary.histogram("y", Y)
46 |
47 |
48 | # Before starting, initialize the variables. We will `run` this first.
49 | init = tf.global_variables_initializer()
50 |
51 |
52 | # Launch the graph,
53 | with tf.Session() as sess:
54 | # tensorboard --logdir=./logs/xor_logs
55 | merged = tf.summary.merge_all()
56 | writer = tf.summary.FileWriter("./logs/xor_logs", sess.graph_def)
57 |
58 | sess.run(init)
59 | # Fit the line.
60 | for step in xrange(20000):
61 | sess.run(train, feed_dict={X:x_data, Y:y_data})
62 | if step % 200 == 0:
63 | summary = sess.run(merged, feed_dict={X:x_data, Y:y_data})
64 | writer.add_summary(summary, step)
65 | print step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W1), sess.run(W2)
66 |
67 | # Test model
68 | correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
69 | # Calculate accuracy
70 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
71 | print sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], feed_dict={X:x_data, Y:y_data})
72 | print "Accuracy:", accuracy.eval({X:x_data, Y:y_data})
--------------------------------------------------------------------------------
/7.TensorBoard/XORtrain.txt:
--------------------------------------------------------------------------------
1 | 0 0 0
2 | 0 1 1
3 | 1 0 1
4 | 1 1 0
5 |
6 |
--------------------------------------------------------------------------------
/7.TensorBoard/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/__init__.py
--------------------------------------------------------------------------------
/7.TensorBoard/labels_1024.tsv:
--------------------------------------------------------------------------------
1 | 7
2 | 2
3 | 1
4 | 0
5 | 4
6 | 1
7 | 4
8 | 9
9 | 5
10 | 9
11 | 0
12 | 6
13 | 9
14 | 0
15 | 1
16 | 5
17 | 9
18 | 7
19 | 3
20 | 4
21 | 9
22 | 6
23 | 6
24 | 5
25 | 4
26 | 0
27 | 7
28 | 4
29 | 0
30 | 1
31 | 3
32 | 1
33 | 3
34 | 4
35 | 7
36 | 2
37 | 7
38 | 1
39 | 2
40 | 1
41 | 1
42 | 7
43 | 4
44 | 2
45 | 3
46 | 5
47 | 1
48 | 2
49 | 4
50 | 4
51 | 6
52 | 3
53 | 5
54 | 5
55 | 6
56 | 0
57 | 4
58 | 1
59 | 9
60 | 5
61 | 7
62 | 8
63 | 9
64 | 3
65 | 7
66 | 4
67 | 6
68 | 4
69 | 3
70 | 0
71 | 7
72 | 0
73 | 2
74 | 9
75 | 1
76 | 7
77 | 3
78 | 2
79 | 9
80 | 7
81 | 7
82 | 6
83 | 2
84 | 7
85 | 8
86 | 4
87 | 7
88 | 3
89 | 6
90 | 1
91 | 3
92 | 6
93 | 9
94 | 3
95 | 1
96 | 4
97 | 1
98 | 7
99 | 6
100 | 9
101 | 6
102 | 0
103 | 5
104 | 4
105 | 9
106 | 9
107 | 2
108 | 1
109 | 9
110 | 4
111 | 8
112 | 7
113 | 3
114 | 9
115 | 7
116 | 4
117 | 4
118 | 4
119 | 9
120 | 2
121 | 5
122 | 4
123 | 7
124 | 6
125 | 7
126 | 9
127 | 0
128 | 5
129 | 8
130 | 5
131 | 6
132 | 6
133 | 5
134 | 7
135 | 8
136 | 1
137 | 0
138 | 1
139 | 6
140 | 4
141 | 6
142 | 7
143 | 3
144 | 1
145 | 7
146 | 1
147 | 8
148 | 2
149 | 0
150 | 2
151 | 9
152 | 9
153 | 5
154 | 5
155 | 1
156 | 5
157 | 6
158 | 0
159 | 3
160 | 4
161 | 4
162 | 6
163 | 5
164 | 4
165 | 6
166 | 5
167 | 4
168 | 5
169 | 1
170 | 4
171 | 4
172 | 7
173 | 2
174 | 3
175 | 2
176 | 7
177 | 1
178 | 8
179 | 1
180 | 8
181 | 1
182 | 8
183 | 5
184 | 0
185 | 8
186 | 9
187 | 2
188 | 5
189 | 0
190 | 1
191 | 1
192 | 1
193 | 0
194 | 9
195 | 0
196 | 3
197 | 1
198 | 6
199 | 4
200 | 2
201 | 3
202 | 6
203 | 1
204 | 1
205 | 1
206 | 3
207 | 9
208 | 5
209 | 2
210 | 9
211 | 4
212 | 5
213 | 9
214 | 3
215 | 9
216 | 0
217 | 3
218 | 6
219 | 5
220 | 5
221 | 7
222 | 2
223 | 2
224 | 7
225 | 1
226 | 2
227 | 8
228 | 4
229 | 1
230 | 7
231 | 3
232 | 3
233 | 8
234 | 8
235 | 7
236 | 9
237 | 2
238 | 2
239 | 4
240 | 1
241 | 5
242 | 9
243 | 8
244 | 7
245 | 2
246 | 3
247 | 0
248 | 4
249 | 4
250 | 2
251 | 4
252 | 1
253 | 9
254 | 5
255 | 7
256 | 7
257 | 2
258 | 8
259 | 2
260 | 6
261 | 8
262 | 5
263 | 7
264 | 7
265 | 9
266 | 1
267 | 8
268 | 1
269 | 8
270 | 0
271 | 3
272 | 0
273 | 1
274 | 9
275 | 9
276 | 4
277 | 1
278 | 8
279 | 2
280 | 1
281 | 2
282 | 9
283 | 7
284 | 5
285 | 9
286 | 2
287 | 6
288 | 4
289 | 1
290 | 5
291 | 8
292 | 2
293 | 9
294 | 2
295 | 0
296 | 4
297 | 0
298 | 0
299 | 2
300 | 8
301 | 4
302 | 7
303 | 1
304 | 2
305 | 4
306 | 0
307 | 2
308 | 7
309 | 4
310 | 3
311 | 3
312 | 0
313 | 0
314 | 3
315 | 1
316 | 9
317 | 6
318 | 5
319 | 2
320 | 5
321 | 9
322 | 2
323 | 9
324 | 3
325 | 0
326 | 4
327 | 2
328 | 0
329 | 7
330 | 1
331 | 1
332 | 2
333 | 1
334 | 5
335 | 3
336 | 3
337 | 9
338 | 7
339 | 8
340 | 6
341 | 5
342 | 6
343 | 1
344 | 3
345 | 8
346 | 1
347 | 0
348 | 5
349 | 1
350 | 3
351 | 1
352 | 5
353 | 5
354 | 6
355 | 1
356 | 8
357 | 5
358 | 1
359 | 7
360 | 9
361 | 4
362 | 6
363 | 2
364 | 2
365 | 5
366 | 0
367 | 6
368 | 5
369 | 6
370 | 3
371 | 7
372 | 2
373 | 0
374 | 8
375 | 8
376 | 5
377 | 4
378 | 1
379 | 1
380 | 4
381 | 0
382 | 3
383 | 3
384 | 7
385 | 6
386 | 1
387 | 6
388 | 2
389 | 1
390 | 9
391 | 2
392 | 8
393 | 6
394 | 1
395 | 9
396 | 5
397 | 2
398 | 5
399 | 4
400 | 4
401 | 2
402 | 8
403 | 3
404 | 8
405 | 2
406 | 4
407 | 5
408 | 0
409 | 3
410 | 1
411 | 7
412 | 7
413 | 5
414 | 7
415 | 9
416 | 7
417 | 1
418 | 9
419 | 2
420 | 1
421 | 4
422 | 2
423 | 9
424 | 2
425 | 0
426 | 4
427 | 9
428 | 1
429 | 4
430 | 8
431 | 1
432 | 8
433 | 4
434 | 5
435 | 9
436 | 8
437 | 8
438 | 3
439 | 7
440 | 6
441 | 0
442 | 0
443 | 3
444 | 0
445 | 2
446 | 6
447 | 6
448 | 4
449 | 9
450 | 3
451 | 3
452 | 3
453 | 2
454 | 3
455 | 9
456 | 1
457 | 2
458 | 6
459 | 8
460 | 0
461 | 5
462 | 6
463 | 6
464 | 6
465 | 3
466 | 8
467 | 8
468 | 2
469 | 7
470 | 5
471 | 8
472 | 9
473 | 6
474 | 1
475 | 8
476 | 4
477 | 1
478 | 2
479 | 5
480 | 9
481 | 1
482 | 9
483 | 7
484 | 5
485 | 4
486 | 0
487 | 8
488 | 9
489 | 9
490 | 1
491 | 0
492 | 5
493 | 2
494 | 3
495 | 7
496 | 8
497 | 9
498 | 4
499 | 0
500 | 6
501 | 3
502 | 9
503 | 5
504 | 2
505 | 1
506 | 3
507 | 1
508 | 3
509 | 6
510 | 5
511 | 7
512 | 4
513 | 2
514 | 2
515 | 6
516 | 3
517 | 2
518 | 6
519 | 5
520 | 4
521 | 8
522 | 9
523 | 7
524 | 1
525 | 3
526 | 0
527 | 3
528 | 8
529 | 3
530 | 1
531 | 9
532 | 3
533 | 4
534 | 4
535 | 6
536 | 4
537 | 2
538 | 1
539 | 8
540 | 2
541 | 5
542 | 4
543 | 8
544 | 8
545 | 4
546 | 0
547 | 0
548 | 2
549 | 3
550 | 2
551 | 7
552 | 7
553 | 0
554 | 8
555 | 7
556 | 4
557 | 4
558 | 7
559 | 9
560 | 6
561 | 9
562 | 0
563 | 9
564 | 8
565 | 0
566 | 4
567 | 6
568 | 0
569 | 6
570 | 3
571 | 5
572 | 4
573 | 8
574 | 3
575 | 3
576 | 9
577 | 3
578 | 3
579 | 3
580 | 7
581 | 8
582 | 0
583 | 8
584 | 2
585 | 1
586 | 7
587 | 0
588 | 6
589 | 5
590 | 4
591 | 3
592 | 8
593 | 0
594 | 9
595 | 6
596 | 3
597 | 8
598 | 0
599 | 9
600 | 9
601 | 6
602 | 8
603 | 6
604 | 8
605 | 5
606 | 7
607 | 8
608 | 6
609 | 0
610 | 2
611 | 4
612 | 0
613 | 2
614 | 2
615 | 3
616 | 1
617 | 9
618 | 7
619 | 5
620 | 1
621 | 0
622 | 8
623 | 4
624 | 6
625 | 2
626 | 6
627 | 7
628 | 9
629 | 3
630 | 2
631 | 9
632 | 8
633 | 2
634 | 2
635 | 9
636 | 2
637 | 7
638 | 3
639 | 5
640 | 9
641 | 1
642 | 8
643 | 0
644 | 2
645 | 0
646 | 5
647 | 2
648 | 1
649 | 3
650 | 7
651 | 6
652 | 7
653 | 1
654 | 2
655 | 5
656 | 8
657 | 0
658 | 3
659 | 7
660 | 2
661 | 4
662 | 0
663 | 9
664 | 1
665 | 8
666 | 6
667 | 7
668 | 7
669 | 4
670 | 3
671 | 4
672 | 9
673 | 1
674 | 9
675 | 5
676 | 1
677 | 7
678 | 3
679 | 9
680 | 7
681 | 6
682 | 9
683 | 1
684 | 3
685 | 7
686 | 8
687 | 3
688 | 3
689 | 6
690 | 7
691 | 2
692 | 8
693 | 5
694 | 8
695 | 5
696 | 1
697 | 1
698 | 4
699 | 4
700 | 3
701 | 1
702 | 0
703 | 7
704 | 7
705 | 0
706 | 7
707 | 9
708 | 4
709 | 4
710 | 8
711 | 5
712 | 5
713 | 4
714 | 0
715 | 8
716 | 2
717 | 1
718 | 0
719 | 8
720 | 4
721 | 5
722 | 0
723 | 4
724 | 0
725 | 6
726 | 1
727 | 7
728 | 3
729 | 2
730 | 6
731 | 7
732 | 2
733 | 6
734 | 9
735 | 3
736 | 1
737 | 4
738 | 6
739 | 2
740 | 5
741 | 4
742 | 2
743 | 0
744 | 6
745 | 2
746 | 1
747 | 7
748 | 3
749 | 4
750 | 1
751 | 0
752 | 5
753 | 4
754 | 3
755 | 1
756 | 1
757 | 7
758 | 4
759 | 9
760 | 9
761 | 4
762 | 8
763 | 4
764 | 0
765 | 2
766 | 4
767 | 5
768 | 1
769 | 1
770 | 6
771 | 4
772 | 7
773 | 1
774 | 9
775 | 4
776 | 2
777 | 4
778 | 1
779 | 5
780 | 5
781 | 3
782 | 8
783 | 3
784 | 1
785 | 4
786 | 5
787 | 6
788 | 8
789 | 9
790 | 4
791 | 1
792 | 5
793 | 3
794 | 8
795 | 0
796 | 3
797 | 2
798 | 5
799 | 1
800 | 2
801 | 8
802 | 3
803 | 4
804 | 4
805 | 0
806 | 8
807 | 8
808 | 3
809 | 3
810 | 1
811 | 7
812 | 3
813 | 5
814 | 9
815 | 6
816 | 3
817 | 2
818 | 6
819 | 1
820 | 3
821 | 6
822 | 0
823 | 7
824 | 2
825 | 1
826 | 7
827 | 1
828 | 4
829 | 2
830 | 4
831 | 2
832 | 1
833 | 7
834 | 9
835 | 6
836 | 1
837 | 1
838 | 2
839 | 4
840 | 8
841 | 1
842 | 7
843 | 7
844 | 4
845 | 8
846 | 0
847 | 7
848 | 3
849 | 1
850 | 3
851 | 1
852 | 0
853 | 7
854 | 7
855 | 0
856 | 3
857 | 5
858 | 5
859 | 2
860 | 7
861 | 6
862 | 6
863 | 9
864 | 2
865 | 8
866 | 3
867 | 5
868 | 2
869 | 2
870 | 5
871 | 6
872 | 0
873 | 8
874 | 2
875 | 9
876 | 2
877 | 8
878 | 8
879 | 8
880 | 8
881 | 7
882 | 4
883 | 9
884 | 3
885 | 0
886 | 6
887 | 6
888 | 3
889 | 2
890 | 1
891 | 3
892 | 2
893 | 2
894 | 9
895 | 3
896 | 0
897 | 0
898 | 5
899 | 7
900 | 8
901 | 1
902 | 4
903 | 4
904 | 6
905 | 0
906 | 2
907 | 9
908 | 1
909 | 4
910 | 7
911 | 4
912 | 7
913 | 3
914 | 9
915 | 8
916 | 8
917 | 4
918 | 7
919 | 1
920 | 2
921 | 1
922 | 2
923 | 2
924 | 3
925 | 2
926 | 3
927 | 2
928 | 3
929 | 9
930 | 1
931 | 7
932 | 4
933 | 0
934 | 3
935 | 5
936 | 5
937 | 8
938 | 6
939 | 3
940 | 2
941 | 6
942 | 7
943 | 6
944 | 6
945 | 3
946 | 2
947 | 7
948 | 8
949 | 1
950 | 1
951 | 7
952 | 5
953 | 6
954 | 4
955 | 9
956 | 5
957 | 1
958 | 3
959 | 3
960 | 4
961 | 7
962 | 8
963 | 9
964 | 1
965 | 1
966 | 6
967 | 9
968 | 1
969 | 4
970 | 4
971 | 5
972 | 4
973 | 0
974 | 6
975 | 2
976 | 2
977 | 3
978 | 1
979 | 5
980 | 1
981 | 2
982 | 0
983 | 3
984 | 8
985 | 1
986 | 2
987 | 6
988 | 7
989 | 1
990 | 6
991 | 2
992 | 3
993 | 9
994 | 0
995 | 1
996 | 2
997 | 2
998 | 0
999 | 8
1000 | 9
1001 | 9
1002 | 0
1003 | 2
1004 | 5
1005 | 1
1006 | 9
1007 | 7
1008 | 8
1009 | 1
1010 | 0
1011 | 4
1012 | 1
1013 | 7
1014 | 9
1015 | 6
1016 | 4
1017 | 2
1018 | 6
1019 | 8
1020 | 1
1021 | 3
1022 | 7
1023 | 5
1024 | 4
1025 |
--------------------------------------------------------------------------------
/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346805.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346805.jemin-desktop
--------------------------------------------------------------------------------
/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346813.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346813.jemin-desktop
--------------------------------------------------------------------------------
/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346816.jemin-desktop:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/leejaymin/TensorFlowLecture/420e15f79fdc357790c8c50b6e3db82f1e74dcba/7.TensorBoard/logs/xor_logs/events.out.tfevents.1486346816.jemin-desktop
--------------------------------------------------------------------------------
/7.TensorBoard/mnist.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Google, Inc. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | import os
16 | from urllib.request import urlretrieve
17 | import tensorflow as tf
18 | #import urllib
19 |
20 | LOGDIR = '/tmp/mnist_tutorial/'
21 | GIST_URL = 'https://gist.githubusercontent.com/dandelionmane/4f02ab8f1451e276fea1f165a20336f1/raw/dfb8ee95b010480d56a73f324aca480b3820c180/'
22 |
23 | ### MNIST EMBEDDINGS ###
24 | mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=LOGDIR + 'data', one_hot=True)
25 | ### Get a sprite and labels file for the embedding projector ###
26 | urlretrieve(GIST_URL + 'labels_1024.tsv', LOGDIR + 'labels_1024.tsv')
27 | urlretrieve(GIST_URL + 'sprite_1024.png', LOGDIR + 'sprite_1024.png')
28 |
29 | def conv_layer(input, size_in, size_out, name="conv"):
30 | with tf.name_scope(name):
31 | w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name="W")
32 | b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
33 | conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding="SAME")
34 | act = tf.nn.relu(conv + b)
35 | tf.summary.histogram("weights", w)
36 | tf.summary.histogram("biases", b)
37 | tf.summary.histogram("activations", act)
38 | return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
39 |
40 |
41 | def fc_layer(input, size_in, size_out, name="fc"):
42 | with tf.name_scope(name):
43 | w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
44 | b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
45 | act = tf.nn.relu(tf.matmul(input, w) + b)
46 | tf.summary.histogram("weights", w)
47 | tf.summary.histogram("biases", b)
48 | tf.summary.histogram("activations", act)
49 | return act
50 |
51 |
52 | def mnist_model(learning_rate, use_two_conv, use_two_fc, hparam):
53 | tf.reset_default_graph()
54 | sess = tf.Session()
55 |
56 | # Setup placeholders, and reshape the data
57 | x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
58 | x_image = tf.reshape(x, [-1, 28, 28, 1])
59 | tf.summary.image('input', x_image, 3)
60 | y = tf.placeholder(tf.float32, shape=[None, 10], name="labels")
61 |
62 | if use_two_conv:
63 | conv1 = conv_layer(x_image, 1, 32, "conv1")
64 | conv_out = conv_layer(conv1, 32, 64, "conv2")
65 | else:
66 | conv1 = conv_layer(x_image, 1, 64, "conv")
67 | conv_out = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
68 |
69 | flattened = tf.reshape(conv_out, [-1, 7 * 7 * 64])
70 |
71 |
72 | if use_two_fc:
73 | fc1 = fc_layer(flattened, 7 * 7 * 64, 1024, "fc1")
74 | embedding_input = fc1
75 | embedding_size = 1024
76 | logits = fc_layer(fc1, 1024, 10, "fc2")
77 | else:
78 | embedding_input = flattened
79 | embedding_size = 7*7*64
80 | logits = fc_layer(flattened, 7*7*64, 10, "fc")
81 |
82 | with tf.name_scope("xent"):
83 | xent = tf.reduce_mean(
84 | tf.nn.softmax_cross_entropy_with_logits(
85 | logits=logits, labels=y), name="xent")
86 | tf.summary.scalar("xent", xent)
87 |
88 | with tf.name_scope("train"):
89 | train_step = tf.train.AdamOptimizer(learning_rate).minimize(xent)
90 |
91 | with tf.name_scope("accuracy"):
92 | correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
93 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
94 | tf.summary.scalar("accuracy", accuracy)
95 |
96 | summ = tf.summary.merge_all()
97 |
98 |
99 | embedding = tf.Variable(tf.zeros([1024, embedding_size]), name="test_embedding")
100 | assignment = embedding.assign(embedding_input)
101 | saver = tf.train.Saver()
102 |
103 | sess.run(tf.global_variables_initializer())
104 | writer = tf.summary.FileWriter(LOGDIR + hparam)
105 | writer.add_graph(sess.graph)
106 |
107 | config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
108 | embedding_config = config.embeddings.add()
109 | embedding_config.tensor_name = embedding.name
110 | embedding_config.sprite.image_path = LOGDIR + 'sprite_1024.png'
111 | embedding_config.metadata_path = LOGDIR + 'labels_1024.tsv'
112 | # Specify the width and height of a single thumbnail.
113 | embedding_config.sprite.single_image_dim.extend([28, 28])
114 | tf.contrib.tensorboard.plugins.projector.visualize_embeddings(writer, config)
115 |
116 | for i in range(2001):
117 | batch = mnist.train.next_batch(100)
118 | if i % 5 == 0:
119 | [train_accuracy, s] = sess.run([accuracy, summ], feed_dict={x: batch[0], y: batch[1]})
120 | writer.add_summary(s, i)
121 | if i % 500 == 0:
122 | sess.run(assignment, feed_dict={x: mnist.test.images[:1024], y: mnist.test.labels[:1024]})
123 | saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
124 | sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
125 |
126 | def make_hparam_string(learning_rate, use_two_fc, use_two_conv):
127 | conv_param = "conv=2" if use_two_conv else "conv=1"
128 | fc_param = "fc=2" if use_two_fc else "fc=1"
129 | return "lr_%.0E,%s,%s" % (learning_rate, conv_param, fc_param)
130 |
131 | def main():
132 | # You can try adding some more learning rates
133 | for learning_rate in [1E-3, 1E-4, 1E-5]:
134 | # Include "False" as a value to try different model architectures
135 | for use_two_fc in [True, False]:
136 | for use_two_conv in [True, False]:
137 | # Construct a hyperparameter string for each one (example: "lr_1E-3,fc=2,conv=2)
138 | hparam = make_hparam_string(learning_rate, use_two_fc, use_two_conv)
139 | print('Starting run for %s' % hparam)
140 |
141 | # Actually run with the new settings
142 | mnist_model(learning_rate, use_two_fc, use_two_conv, hparam)
143 |
144 |
145 | if __name__ == '__main__':
146 | main()
147 |
--------------------------------------------------------------------------------
/8.Save and Restore/Save and Restore.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# TensorFlow 모델 저장 복구 튜토리얼"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "- 어떻게 모델은 보이는가\n",
15 | "- 어떻게 모델은 저장 되는가\n",
16 | "- prediction과 transfer learning을 위한 restoe 방법\n",
17 | "- pre-trained model을 불러오는 방법 fine-tuning을 위해서"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 4,
23 | "metadata": {
24 | "scrolled": true
25 | },
26 | "outputs": [
27 | {
28 | "name": "stdout",
29 | "output_type": "stream",
30 | "text": [
31 | "24.0\n"
32 | ]
33 | },
34 | {
35 | "data": {
36 | "text/plain": [
37 | "'./my_test_model-1000'"
38 | ]
39 | },
40 | "execution_count": 4,
41 | "metadata": {},
42 | "output_type": "execute_result"
43 | }
44 | ],
45 | "source": [
46 | "import tensorflow as tf\n",
47 | "\n",
48 | "# Prepare to feed input, i.e. feed_dict and placeholders\n",
49 | "w1 = tf.placeholder(tf.float32, name=\"w1\")\n",
50 | "w2 = tf.placeholder(tf.float32, name=\"w2\")\n",
51 | "b1 = tf.Variable(2.0,dtype=tf.float32, name=\"bias\")\n",
52 | "feed_dict = {'w1': 4.0, 'w2': 8.0}\n",
53 | "\n",
54 | "# Define a test operation that we will restore\n",
55 | "w3 = w1 + w2\n",
56 | "w4 = tf.multiply(w3, b1, name=\"op_to_restore\")\n",
57 | "sess = tf.Session()\n",
58 | "sess.run(tf.global_variables_initializer())\n",
59 | "\n",
60 | "# Create a saver object which will save all the variables\n",
61 | "saver = tf.train.Saver()\n",
62 | "\n",
63 | "# Run the operation by feeding input\n",
64 | "result = sess.run(w4, {w1:feed_dict['w1'], w2:feed_dict['w2']})\n",
65 | "print(result)\n",
66 | "# Prints 24 which is sum of (w1+w2)*b1\n",
67 | "\n",
68 | "# Now, save the graph\n",
69 | "saver.save(sess, './my_test_model', global_step=1000)\n"
70 | ]
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {
75 | "collapsed": true
76 | },
77 | "source": [
78 | "복구를 위해서는\n",
79 | "- graph \n",
80 | "- variable\n",
81 | "\n",
82 | "모두를 restore 해야 한다. \n",
83 | "이것을 통해서 새로운 training data를 `feed`할 수 있다. \n",
84 | "`graph.get_tensor_by_name()` method를 통해서 saved operation과 placeholder variables의 reference를 얻을 수 있다. "
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "만약 같은 네트워크에 다른 data를 넣어서 처리하고 싶다면 간단하게 feed_dict을 네트워크에 넣는다."
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 8,
97 | "metadata": {},
98 | "outputs": [
99 | {
100 | "name": "stdout",
101 | "output_type": "stream",
102 | "text": [
103 | "INFO:tensorflow:Restoring parameters from ./my_test_model-1000\n",
104 | "60.0\n"
105 | ]
106 | }
107 | ],
108 | "source": [
109 | "import tensorflow as tf\n",
110 | "\n",
111 | "sess=tf.Session() \n",
112 | "#First let's load meta graph and restore weights\n",
113 | "saver = tf.train.import_meta_graph('my_test_model-1000.meta')\n",
114 | "saver.restore(sess,tf.train.latest_checkpoint('./'))\n",
115 | "\n",
116 | "\n",
117 | "# Now, let's access and create placeholders variables and\n",
118 | "# create feed-dict to feed new data\n",
119 | "\n",
120 | "graph = tf.get_default_graph()\n",
121 | "w1 = graph.get_tensor_by_name(\"w1:0\")\n",
122 | "w2 = graph.get_tensor_by_name(\"w2:0\")\n",
123 | "feed_dict ={w1:13.0,w2:17.0}\n",
124 | "\n",
125 | "#Now, access the op that you want to run. \n",
126 | "op_to_restore = graph.get_tensor_by_name(\"op_to_restore:0\")\n",
127 | "\n",
128 | "print (sess.run(op_to_restore,feed_dict))\n",
129 | "#This will print 60 which is calculated \n",
130 | "#using new values of w1 and w2 and saved value of b1. "
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | "만약 operation과 layer를 좀 더 추가하고 싶다면 아래와 같이 수행 한다."
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 10,
143 | "metadata": {},
144 | "outputs": [
145 | {
146 | "name": "stdout",
147 | "output_type": "stream",
148 | "text": [
149 | "INFO:tensorflow:Restoring parameters from ./my_test_model-1000\n",
150 | "120.0\n"
151 | ]
152 | }
153 | ],
154 | "source": [
155 | "import tensorflow as tf\n",
156 | "\n",
157 | "sess=tf.Session() \n",
158 | "#First let's load meta graph and restore weights\n",
159 | "saver = tf.train.import_meta_graph('my_test_model-1000.meta')\n",
160 | "saver.restore(sess,tf.train.latest_checkpoint('./'))\n",
161 | "\n",
162 | "\n",
163 | "# Now, let's access and create placeholders variables and\n",
164 | "# create feed-dict to feed new data\n",
165 | "\n",
166 | "graph = tf.get_default_graph()\n",
167 | "w1 = graph.get_tensor_by_name(\"w1:0\")\n",
168 | "w2 = graph.get_tensor_by_name(\"w2:0\")\n",
169 | "feed_dict ={w1:13.0,w2:17.0}\n",
170 | "\n",
171 | "#Now, access the op that you want to run. \n",
172 | "op_to_restore = graph.get_tensor_by_name(\"op_to_restore:0\")\n",
173 | "\n",
174 | "#Add more to the current graph\n",
175 | "add_on_op = tf.multiply(op_to_restore,2)\n",
176 | "\n",
177 | "print (sess.run(add_on_op,feed_dict))\n",
178 | "#This will print 120."
179 | ]
180 | },
181 | {
182 | "cell_type": "markdown",
183 | "metadata": {},
184 | "source": [
185 | "만약 fine-tuning을 위해서 일부분만 저장하고 복구하고 싶다면? \n",
186 | "`graph.get_tensor_by_name()` method를 이용해서 적절한 operation만 접근할 수 있다."
187 | ]
188 | }
189 | ],
190 | "metadata": {
191 | "kernelspec": {
192 | "display_name": "Python 3",
193 | "language": "python",
194 | "name": "python3"
195 | },
196 | "language_info": {
197 | "codemirror_mode": {
198 | "name": "ipython",
199 | "version": 3
200 | },
201 | "file_extension": ".py",
202 | "mimetype": "text/x-python",
203 | "name": "python",
204 | "nbconvert_exporter": "python",
205 | "pygments_lexer": "ipython3",
206 | "version": "3.5.2"
207 | }
208 | },
209 | "nbformat": 4,
210 | "nbformat_minor": 2
211 | }
212 |
--------------------------------------------------------------------------------
/9.RNN/gp-for-sine-wave.py:
--------------------------------------------------------------------------------
1 |
2 | # coding: utf-8
3 |
4 | # ## Prediction sine wave function using Gaussian Process
5 | #
6 | # An example for Gaussian process algorithm to predict sine wave function.
7 | # This example is from ["Gaussian Processes regression: basic introductory example"](http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gp_regression.html).
8 |
9 | import numpy as np
10 | from sklearn.gaussian_process import GaussianProcess
11 | from matplotlib import pyplot as pl
12 | %matplotlib inline
13 |
14 | np.random.seed(1)
15 |
16 |
17 | # The function to predict
18 | def f(x):
19 | return x*np.sin(x)
20 |
21 |
22 | # --------------------------
23 | # First the noiseless case
24 | # --------------------------
25 |
26 | # Obervations
27 | X = np.atleast_2d([0., 1., 2., 3., 5., 6., 7., 8., 9.5]).T
28 | y = f(X).ravel()
29 |
30 | #X = np.atleast_2d(np.linspace(0, 100, 200)).T
31 |
32 | # Mesh the input space for evaluations of the real function, the prediction and its MSE
33 | x = np.atleast_2d(np.linspace(0, 10, 1000)).T
34 |
35 | # Instanciate a Gaussian Process model
36 | gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
37 | random_start=100)
38 |
39 | # Fit to data using Maximum Likelihood Estimation of the parameters
40 | gp.fit(X, y)
41 |
42 | # Make the prediction on the meshed x-axis (ask for MSE as well)
43 | y_pred, MSE = gp.predict(x, eval_MSE=True)
44 | sigma = np.sqrt(MSE)
45 |
46 |
47 | # Plot the function, the prediction and the 95% confidence interval based on the MSE
48 | fig = pl.figure()
49 | pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
50 | pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
51 | pl.plot(x, y_pred, 'b-', label=u'Prediction')
52 | pl.fill(np.concatenate([x, x[::-1]]),
53 | np.concatenate([y_pred - 1.9600 * sigma,
54 | (y_pred + 1.9600 * sigma)[::-1]]),
55 | alpha=.5, fc='b', ec='None', label='95% confidence interval')
56 | pl.xlabel('$x$')
57 | pl.ylabel('$f(x)$')
58 | pl.ylim(-10, 20)
59 | pl.legend(loc='upper left')
60 |
61 |
62 | # now the noisy case
63 | X = np.linspace(0.1, 9.9, 20)
64 | X = np.atleast_2d(X).T
65 |
66 | # Observations and noise
67 | y = f(X).ravel()
68 | dy = 0.5 + 1.0 * np.random.random(y.shape)
69 | noise = np.random.normal(0, dy)
70 | y += noise
71 |
72 | # Mesh the input space for evaluations of the real function, the prediction and
73 | # its MSE
74 | x = np.atleast_2d(np.linspace(0, 10, 1000)).T
75 |
76 | # Instanciate a Gaussian Process model
77 | gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
78 | thetaL=1e-3, thetaU=1,
79 | nugget=(dy / y) ** 2,
80 | random_start=100)
81 |
82 | # Fit to data using Maximum Likelihood Estimation of the parameters
83 | gp.fit(X, y)
84 |
85 | # Make the prediction on the meshed x-axis (ask for MSE as well)
86 | y_pred, MSE = gp.predict(x, eval_MSE=True)
87 | sigma = np.sqrt(MSE)
88 |
89 | # Plot the function, the prediction and the 95% confidence interval based on
90 | # the MSE
91 | fig = pl.figure()
92 | pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
93 | pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
94 | pl.plot(x, y_pred, 'b-', label=u'Prediction')
95 | pl.fill(np.concatenate([x, x[::-1]]),
96 | np.concatenate([y_pred - 1.9600 * sigma,
97 | (y_pred + 1.9600 * sigma)[::-1]]),
98 | alpha=.5, fc='b', ec='None', label='95% confidence interval')
99 | pl.xlabel('$x$')
100 | pl.ylabel('$f(x)$')
101 | pl.ylim(-10, 20)
102 | pl.legend(loc='upper left')
103 |
104 | pl.show()
105 |
106 |
--------------------------------------------------------------------------------
/9.RNN/lab-12-1-hello-rnn.py:
--------------------------------------------------------------------------------
1 | # Lab 12 RNN
2 | import tensorflow as tf
3 | import numpy as np
4 | tf.set_random_seed(777) # reproducibility
5 |
6 | idx2char = ['h', 'i', 'e', 'l', 'o']
7 | # Teach hello: hihell -> ihello
8 | x_data = [[0, 1, 0, 2, 3, 3]] # hihell
9 | x_one_hot = [[[1, 0, 0, 0, 0], # h 0
10 | [0, 1, 0, 0, 0], # i 1
11 | [1, 0, 0, 0, 0], # h 0
12 | [0, 0, 1, 0, 0], # e 2
13 | [0, 0, 0, 1, 0], # l 3
14 | [0, 0, 0, 1, 0]]] # l 3
15 |
16 | y_data = [[1, 0, 2, 3, 3, 4]] # ihello
17 |
18 | num_classes = 5
19 | input_dim = 5 # one-hot size
20 | hidden_size = 5 # output from the LSTM. 5 to directly predict one-hot
21 | batch_size = 1 # one sentence
22 | sequence_length = 6 # |ihello| == 6
23 |
24 | X = tf.placeholder(
25 | tf.float32, [None, sequence_length, hidden_size]) # X one-hot
26 | Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
27 |
28 | cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
29 | initial_state = cell.zero_state(batch_size, tf.float32)
30 | outputs, _states = tf.nn.dynamic_rnn(
31 | cell, X, initial_state=initial_state, dtype=tf.float32)
32 |
33 | # FC layer
34 | X_for_fc = tf.reshape(outputs, [-1, hidden_size])
35 | # fc_w = tf.get_variable("fc_w", [hidden_size, num_classes])
36 | # fc_b = tf.get_variable("fc_b", [num_classes])
37 | # outputs = tf.matmul(X_for_fc, fc_w) + fc_b
38 | outputs = tf.contrib.layers.fully_connected(
39 | inputs=X_for_fc, num_outputs=num_classes, activation_fn=None)
40 |
41 | # reshape out for sequence_loss
42 | outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
43 |
44 | weights = tf.ones([batch_size, sequence_length])
45 | sequence_loss = tf.contrib.seq2seq.sequence_loss(
46 | logits=outputs, targets=Y, weights=weights)
47 | loss = tf.reduce_mean(sequence_loss)
48 | train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
49 |
50 | prediction = tf.argmax(outputs, axis=2)
51 |
52 | with tf.Session() as sess:
53 | sess.run(tf.global_variables_initializer())
54 | for i in range(50):
55 | l, _ = sess.run([loss, train], feed_dict={X: x_one_hot, Y: y_data})
56 | result = sess.run(prediction, feed_dict={X: x_one_hot})
57 | print(i, "loss:", l, "prediction: ", result, "true Y: ", y_data)
58 |
59 | # print char using dic
60 | result_str = [idx2char[c] for c in np.squeeze(result)]
61 | print("\tPrediction str: ", ''.join(result_str))
62 |
63 | '''
64 | 0 loss: 1.71584 prediction: [[2 2 2 3 3 2]] true Y: [[1, 0, 2, 3, 3, 4]]
65 | Prediction str: eeelle
66 | 1 loss: 1.56447 prediction: [[3 3 3 3 3 3]] true Y: [[1, 0, 2, 3, 3, 4]]
67 | Prediction str: llllll
68 | 2 loss: 1.46284 prediction: [[3 3 3 3 3 3]] true Y: [[1, 0, 2, 3, 3, 4]]
69 | Prediction str: llllll
70 | 3 loss: 1.38073 prediction: [[3 3 3 3 3 3]] true Y: [[1, 0, 2, 3, 3, 4]]
71 | Prediction str: llllll
72 | 4 loss: 1.30603 prediction: [[3 3 3 3 3 3]] true Y: [[1, 0, 2, 3, 3, 4]]
73 | Prediction str: llllll
74 | 5 loss: 1.21498 prediction: [[3 3 3 3 3 3]] true Y: [[1, 0, 2, 3, 3, 4]]
75 | Prediction str: llllll
76 | 6 loss: 1.1029 prediction: [[3 0 3 3 3 4]] true Y: [[1, 0, 2, 3, 3, 4]]
77 | Prediction str: lhlllo
78 | 7 loss: 0.982386 prediction: [[1 0 3 3 3 4]] true Y: [[1, 0, 2, 3, 3, 4]]
79 | Prediction str: ihlllo
80 | 8 loss: 0.871259 prediction: [[1 0 3 3 3 4]] true Y: [[1, 0, 2, 3, 3, 4]]
81 | Prediction str: ihlllo
82 | 9 loss: 0.774338 prediction: [[1 0 2 3 3 4]] true Y: [[1, 0, 2, 3, 3, 4]]
83 | Prediction str: ihello
84 | 10 loss: 0.676005 prediction: [[1 0 2 3 3 4]] true Y: [[1, 0, 2, 3, 3, 4]]
85 | Prediction str: ihello
86 |
87 | ...
88 |
89 | '''
90 |
--------------------------------------------------------------------------------
/9.RNN/lab-12-2-char-seq-rnn.py:
--------------------------------------------------------------------------------
1 | # Lab 12 Character Sequence RNN
2 | import tensorflow as tf
3 | import numpy as np
4 | tf.set_random_seed(777) # reproducibility
5 |
6 | sample = " if you want you"
7 | idx2char = list(set(sample)) # index -> char
8 | char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex
9 |
10 | # hyper parameters
11 | dic_size = len(char2idx) # RNN input size (one hot size)
12 | hidden_size = len(char2idx) # RNN output size
13 | num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
14 | batch_size = 1 # one sample data, one batch
15 | sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
16 |
17 | sample_idx = [char2idx[c] for c in sample] # char to index
18 | x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
19 | y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello
20 |
21 | X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
22 | Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
23 |
24 | x_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
25 | cell = tf.contrib.rnn.BasicLSTMCell(
26 | num_units=hidden_size, state_is_tuple=True)
27 | initial_state = cell.zero_state(batch_size, tf.float32)
28 | outputs, _states = tf.nn.dynamic_rnn(
29 | cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
30 |
31 | # FC layer
32 | X_for_fc = tf.reshape(outputs, [-1, hidden_size])
33 | outputs = tf.contrib.layers.fully_connected(outputs, num_classes, activation_fn=None)
34 |
35 | # reshape out for sequence_loss
36 | outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
37 |
38 | weights = tf.ones([batch_size, sequence_length])
39 | sequence_loss = tf.contrib.seq2seq.sequence_loss(
40 | logits=outputs, targets=Y, weights=weights)
41 | loss = tf.reduce_mean(sequence_loss)
42 | train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
43 |
44 | prediction = tf.argmax(outputs, axis=2)
45 |
46 | with tf.Session() as sess:
47 | sess.run(tf.global_variables_initializer())
48 | for i in range(50):
49 | l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
50 | result = sess.run(prediction, feed_dict={X: x_data})
51 |
52 | # print char using dic
53 | result_str = [idx2char[c] for c in np.squeeze(result)]
54 |
55 | print(i, "loss:", l, "Prediction:", ''.join(result_str))
56 |
57 |
58 | '''
59 | 0 loss: 2.35377 Prediction: uuuuuuuuuuuuuuu
60 | 1 loss: 2.21383 Prediction: yy you y you
61 | 2 loss: 2.04317 Prediction: yy yoo ou
62 | 3 loss: 1.85869 Prediction: yy ou uou
63 | 4 loss: 1.65096 Prediction: yy you a you
64 | 5 loss: 1.40243 Prediction: yy you yan you
65 | 6 loss: 1.12986 Prediction: yy you wann you
66 | 7 loss: 0.907699 Prediction: yy you want you
67 | 8 loss: 0.687401 Prediction: yf you want you
68 | 9 loss: 0.508868 Prediction: yf you want you
69 | 10 loss: 0.379423 Prediction: yf you want you
70 | 11 loss: 0.282956 Prediction: if you want you
71 | 12 loss: 0.208561 Prediction: if you want you
72 |
73 | ...
74 |
75 | '''
76 |
--------------------------------------------------------------------------------
/9.RNN/lab-12-3-char-seq-softmax-only.py:
--------------------------------------------------------------------------------
1 | # Lab 12 Character Sequence Softmax only
2 | import tensorflow as tf
3 | import numpy as np
4 | tf.set_random_seed(777) # reproducibility
5 |
6 | sample = " if you want you"
7 | idx2char = list(set(sample)) # index -> char
8 | char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex
9 |
10 | # hyper parameters
11 | dic_size = len(char2idx) # RNN input size (one hot size)
12 | rnn_hidden_size = len(char2idx) # RNN output size
13 | num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
14 | batch_size = 1 # one sample data, one batch
15 | sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
16 |
17 | sample_idx = [char2idx[c] for c in sample] # char to index
18 | x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
19 | y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello
20 |
21 | X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
22 | Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
23 |
24 | # flatten the data (ignore batches for now). No effect if the batch size is 1
25 | X_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
26 | X_for_softmax = tf.reshape(X_one_hot, [-1, rnn_hidden_size])
27 |
28 | # softmax layer (rnn_hidden_size -> num_classes)
29 | softmax_w = tf.get_variable("softmax_w", [rnn_hidden_size, num_classes])
30 | softmax_b = tf.get_variable("softmax_b", [num_classes])
31 | outputs = tf.matmul(X_for_softmax, softmax_w) + softmax_b
32 |
33 | # expend the data (revive the batches)
34 | outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
35 | weights = tf.ones([batch_size, sequence_length])
36 |
37 | # Compute sequence cost/loss
38 | sequence_loss = tf.contrib.seq2seq.sequence_loss(
39 | logits=outputs, targets=Y, weights=weights)
40 | loss = tf.reduce_mean(sequence_loss) # mean all sequence loss
41 | train = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
42 |
43 | prediction = tf.argmax(outputs, axis=2)
44 |
45 | with tf.Session() as sess:
46 | sess.run(tf.global_variables_initializer())
47 | for i in range(3000):
48 | l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
49 | result = sess.run(prediction, feed_dict={X: x_data})
50 |
51 | # print char using dic
52 | result_str = [idx2char[c] for c in np.squeeze(result)]
53 | print(i, "loss:", l, "Prediction:", ''.join(result_str))
54 |
55 | '''
56 | 0 loss: 2.29513 Prediction: yu yny y y oyny
57 | 1 loss: 2.10156 Prediction: yu ynu y y oynu
58 | 2 loss: 1.92344 Prediction: yu you y u you
59 |
60 | ..
61 |
62 | 2997 loss: 0.277323 Prediction: yf you yant you
63 | 2998 loss: 0.277323 Prediction: yf you yant you
64 | 2999 loss: 0.277323 Prediction: yf you yant you
65 | '''
66 |
--------------------------------------------------------------------------------
/9.RNN/lab-12-4-rnn_long_char.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import tensorflow as tf
4 | import numpy as np
5 | from tensorflow.contrib import rnn
6 | tf.set_random_seed(777) # reproducibility
7 |
8 | sentence = ("if you want to build a ship, don't drum up people together to "
9 | "collect wood and don't assign them tasks and work, but rather "
10 | "teach them to long for the endless immensity of the sea.")
11 |
12 | char_set = list(set(sentence))
13 | char_dic = {w: i for i, w in enumerate(char_set)}
14 |
15 | data_dim = len(char_set)
16 | hidden_size = len(char_set)
17 | num_classes = len(char_set)
18 | sequence_length = 10 # Any arbitrary number
19 |
20 | dataX = []
21 | dataY = []
22 | for i in range(0, len(sentence) - sequence_length):
23 | x_str = sentence[i:i + sequence_length]
24 | y_str = sentence[i + 1: i + sequence_length + 1]
25 | print(i, x_str, '->', y_str)
26 |
27 | x = [char_dic[c] for c in x_str] # x str to index
28 | y = [char_dic[c] for c in y_str] # y str to index
29 |
30 | dataX.append(x)
31 | dataY.append(y)
32 |
33 | batch_size = len(dataX)
34 |
35 | X = tf.placeholder(tf.int32, [None, sequence_length])
36 | Y = tf.placeholder(tf.int32, [None, sequence_length])
37 |
38 | # One-hot encoding
39 | X_one_hot = tf.one_hot(X, num_classes)
40 | print(X_one_hot) # check out the shape
41 |
42 | # Make a lstm cell with hidden_size (each unit output vector size)
43 | cell = rnn.BasicLSTMCell(hidden_size, state_is_tuple=True)
44 | cell = rnn.MultiRNNCell([cell] * 2, state_is_tuple=True)
45 |
46 | # outputs: unfolding size x hidden size, state = hidden size
47 | outputs, _states = tf.nn.dynamic_rnn(cell, X_one_hot, dtype=tf.float32)
48 |
49 | # FC layer
50 | X_for_fc = tf.reshape(outputs, [-1, hidden_size])
51 | outputs = tf.contrib.layers.fully_connected(X_for_fc, num_classes, activation_fn=None)
52 |
53 | # reshape out for sequence_loss
54 | outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
55 |
56 | # All weights are 1 (equal weights)
57 | weights = tf.ones([batch_size, sequence_length])
58 |
59 | sequence_loss = tf.contrib.seq2seq.sequence_loss(
60 | logits=outputs, targets=Y, weights=weights)
61 | mean_loss = tf.reduce_mean(sequence_loss)
62 | train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(mean_loss)
63 |
64 | sess = tf.Session()
65 | sess.run(tf.global_variables_initializer())
66 |
67 | for i in range(500):
68 | _, l, results = sess.run(
69 | [train_op, mean_loss, outputs], feed_dict={X: dataX, Y: dataY})
70 | for j, result in enumerate(results):
71 | index = np.argmax(result, axis=1)
72 | print(i, j, ''.join([char_set[t] for t in index]), l)
73 |
74 | # Let's print the last char of each result to check it works
75 | results = sess.run(outputs, feed_dict={X: dataX})
76 | for j, result in enumerate(results):
77 | index = np.argmax(result, axis=1)
78 | if j is 0: # print all for the first result to make a sentence
79 | print(''.join([char_set[t] for t in index]), end='')
80 | else:
81 | print(char_set[index[-1]], end='')
82 |
83 | '''
84 | 0 167 tttttttttt 3.23111
85 | 0 168 tttttttttt 3.23111
86 | 0 169 tttttttttt 3.23111
87 | …
88 | 499 167 of the se 0.229616
89 | 499 168 tf the sea 0.229616
90 | 499 169 the sea. 0.229616
91 |
92 | g you want to build a ship, don't drum up people together to collect wood and don't assign them tasks and work, but rather teach them to long for the endless immensity of the sea.
93 |
94 | '''
95 |
--------------------------------------------------------------------------------
/9.RNN/lab-12-5-rnn_stock_prediction.py:
--------------------------------------------------------------------------------
1 | '''
2 | This script shows how to predict stock prices using a basic RNN
3 | '''
4 | import tensorflow as tf
5 | import numpy as np
6 | import matplotlib
7 | import os
8 |
9 | tf.set_random_seed(777) # reproducibility
10 |
11 | if "DISPLAY" not in os.environ:
12 | # remove Travis CI Error
13 | matplotlib.use('Agg')
14 |
15 | import matplotlib.pyplot as plt
16 |
17 |
18 | def MinMaxScaler(data):
19 | ''' Min Max Normalization
20 |
21 | Parameters
22 | ----------
23 | data : numpy.ndarray
24 | input data to be normalized
25 | shape: [Batch size, dimension]
26 |
27 | Returns
28 | ----------
29 | data : numpy.ndarry
30 | normalized data
31 | shape: [Batch size, dimension]
32 |
33 | References
34 | ----------
35 | .. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
36 |
37 | '''
38 | numerator = data - np.min(data, 0)
39 | denominator = np.max(data, 0) - np.min(data, 0)
40 | # noise term prevents the zero division
41 | return numerator / (denominator + 1e-7)
42 |
43 |
44 | # train Parameters
45 | timesteps = seq_length = 7
46 | data_dim = 5
47 | hidden_dim = 10
48 | output_dim = 1
49 | learing_rate = 0.01
50 | iterations = 500
51 |
52 | # Open, High, Low, Volume, Close
53 | xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
54 | xy = xy[::-1] # reverse order (chronically ordered)
55 | xy = MinMaxScaler(xy)
56 | x = xy
57 | y = xy[:, [-1]] # Close as label
58 |
59 | # build a dataset
60 | dataX = []
61 | dataY = []
62 | for i in range(0, len(y) - seq_length):
63 | _x = x[i:i + seq_length]
64 | _y = y[i + seq_length] # Next close price
65 | print(_x, "->", _y)
66 | dataX.append(_x)
67 | dataY.append(_y)
68 |
69 | # train/test split
70 | train_size = int(len(dataY) * 0.7)
71 | test_size = len(dataY) - train_size
72 | trainX, testX = np.array(dataX[0:train_size]), np.array(
73 | dataX[train_size:len(dataX)])
74 | trainY, testY = np.array(dataY[0:train_size]), np.array(
75 | dataY[train_size:len(dataY)])
76 |
77 | # input place holders
78 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
79 | Y = tf.placeholder(tf.float32, [None, 1])
80 |
81 | # build a LSTM network
82 | cell = tf.contrib.rnn.BasicLSTMCell(
83 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
84 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
85 | Y_pred = tf.contrib.layers.fully_connected(
86 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
87 |
88 | # cost/loss
89 | loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
90 | # optimizer
91 | optimizer = tf.train.AdamOptimizer(learing_rate)
92 | train = optimizer.minimize(loss)
93 |
94 | # RMSE
95 | targets = tf.placeholder(tf.float32, [None, 1])
96 | predictions = tf.placeholder(tf.float32, [None, 1])
97 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
98 |
99 | with tf.Session() as sess:
100 | init = tf.global_variables_initializer()
101 | sess.run(init)
102 |
103 | # Training step
104 | for i in range(iterations):
105 | _, step_loss = sess.run([train, loss], feed_dict={
106 | X: trainX, Y: trainY})
107 | print("[step: {}] loss: {}".format(i, step_loss))
108 |
109 | # Test step
110 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
111 | rmse = sess.run(rmse, feed_dict={
112 | targets: testY, predictions: test_predict})
113 | print("RMSE: {}".format(rmse))
114 |
115 | # Plot predictions
116 | plt.plot(testY)
117 | plt.plot(test_predict)
118 | plt.xlabel("Time Period")
119 | plt.ylabel("Stock Price")
120 | plt.show()
121 |
--------------------------------------------------------------------------------
/9.RNN/lstm-for-epf.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from matplotlib import pyplot as plt
4 |
5 | from tensorflow.contrib import learn
6 | from sklearn.metrics import mean_squared_error, mean_absolute_error
7 | from lstm_predictor import generate_data, load_csvdata, lstm_model
8 |
9 |
10 | LOG_DIR = './ops_logs'
11 | TIMESTEPS = 10
12 | RNN_LAYERS = [{'steps': TIMESTEPS}]
13 | DENSE_LAYERS = [10, 10]
14 | TRAINING_STEPS = 100000
15 | BATCH_SIZE = 100
16 | PRINT_STEPS = TRAINING_STEPS / 100
17 |
18 | dateparse = lambda dates: pd.datetime.strptime(dates, '%d/%m/%Y %H:%M')
19 | rawdata = pd.read_csv("./RealMarketPriceDataPT.csv",
20 | parse_dates={'timeline': ['date', '(UTC)']},
21 | index_col='timeline', date_parser=dateparse)
22 |
23 |
24 | X, y = load_csvdata(rawdata, TIMESTEPS, seperate=False)
25 |
26 |
27 | regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),
28 | n_classes=0,
29 | verbose=1,
30 | steps=TRAINING_STEPS,
31 | optimizer='Adagrad',
32 | learning_rate=0.03,
33 | batch_size=BATCH_SIZE)
34 |
35 |
36 |
37 |
38 | validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],
39 | every_n_steps=PRINT_STEPS,
40 | early_stopping_rounds=1000)
41 |
42 | regressor.fit(X['train'], y['train'], monitors=[validation_monitor], logdir=LOG_DIR)
43 |
44 |
45 | predicted = regressor.predict(X['test'])
46 | mse = mean_absolute_error(y['test'], predicted)
47 | print ("Error: %f" % mse)
48 |
49 | plot_predicted, = plt.plot(predicted, label='predicted')
50 | plot_test, = plt.plot(y['test'], label='test')
51 | plt.legend(handles=[plot_predicted, plot_test])
52 |
--------------------------------------------------------------------------------
/9.RNN/lstm-for-sine-wave.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Libraries\n",
8 | "\n",
9 | "- numpy: package for scientific computing \n",
10 | "- matplotlib: 2D plotting library\n",
11 | "- tensorflow: open source software library for machine intelligence\n",
12 | "- **learn**: Simplified interface for TensorFlow (mimicking Scikit Learn) for Deep Learning\n",
13 | "- mse: \"mean squared error\" as evaluation metric\n",
14 | "- **lstm_predictor**: our lstm class "
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 1,
20 | "metadata": {
21 | "collapsed": true
22 | },
23 | "outputs": [],
24 | "source": [
25 | "%matplotlib inline\n",
26 | "import numpy as np\n",
27 | "from matplotlib import pyplot as plt\n",
28 | "\n",
29 | "from tensorflow.contrib import learn\n",
30 | "from sklearn.metrics import mean_squared_error, mean_absolute_error\n",
31 | "from lstm_predictor import generate_data, lstm_model"
32 | ]
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "metadata": {},
37 | "source": [
38 | "## Parameter definitions\n",
39 | "\n",
40 | "- LOG_DIR: log file\n",
41 | "- TIMESTEPS: RNN time steps\n",
42 | "- RNN_LAYERS: RNN layer information\n",
43 | "- DENSE_LAYERS: Size of DNN, [10, 10]: Two dense layer with 10 hidden units\n",
44 | "- TRAINING_STEPS\n",
45 | "- BATCH_SIZE\n",
46 | "- PRINT_STEPS"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 2,
52 | "metadata": {
53 | "collapsed": true
54 | },
55 | "outputs": [],
56 | "source": [
57 | "LOG_DIR = './ops_logs'\n",
58 | "TIMESTEPS = 5\n",
59 | "RNN_LAYERS = [{'steps': TIMESTEPS}]\n",
60 | "DENSE_LAYERS = [10, 10]\n",
61 | "TRAINING_STEPS = 100000\n",
62 | "BATCH_SIZE = 100\n",
63 | "PRINT_STEPS = TRAINING_STEPS / 100"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "## Generate waveform\n",
71 | " - fct: function\n",
72 | " - x: observation\n",
73 | " - time_steps\n",
74 | " - seperate: check multimodal"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 3,
80 | "metadata": {
81 | "collapsed": true
82 | },
83 | "outputs": [],
84 | "source": [
85 | "X, y = generate_data(np.sin, np.linspace(0, 100, 10000), TIMESTEPS, seperate=False)"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {},
91 | "source": [
92 | "## Create a regressor with TF Learn\n",
93 | "**Parameters**: \n",
94 | "- model_fn: regression model\n",
95 | "- n_classes: 0 for regression\n",
96 | "- verbose\n",
97 | "- steps: training steps\n",
98 | "- optimizer: (\"SGD\", \"Adam\", \"Adagrad\")\n",
99 | "- learning_rate\n",
100 | "- batch_size"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 4,
106 | "metadata": {
107 | "collapsed": true
108 | },
109 | "outputs": [],
110 | "source": [
111 | "regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS), \n",
112 | " n_classes=0,\n",
113 | " verbose=1, \n",
114 | " steps=TRAINING_STEPS, \n",
115 | " optimizer='Adagrad',\n",
116 | " learning_rate=0.03, \n",
117 | " batch_size=BATCH_SIZE)"
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "metadata": {},
123 | "source": [
124 | "## ValidationMonitor\n",
125 | " - x\n",
126 | " - y\n",
127 | " - every_n_steps\n",
128 | " - early_stopping_rounds"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 5,
134 | "metadata": {
135 | "collapsed": true
136 | },
137 | "outputs": [],
138 | "source": [
139 | "validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],\n",
140 | " every_n_steps=PRINT_STEPS,\n",
141 | " early_stopping_rounds=1000)"
142 | ]
143 | },
144 | {
145 | "cell_type": "markdown",
146 | "metadata": {},
147 | "source": [
148 | "## Train and validation\n",
149 | "\n",
150 | "- fit: fitting using training data"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 6,
156 | "metadata": {
157 | "collapsed": false,
158 | "scrolled": true
159 | },
160 | "outputs": [
161 | {
162 | "name": "stderr",
163 | "output_type": "stream",
164 | "text": [
165 | "WARNING:tensorflow:Input iterator is exhausted: .\n",
166 | "WARNING:tensorflow:Input iterator is exhausted: .\n"
167 | ]
168 | },
169 | {
170 | "data": {
171 | "text/plain": [
172 | "TensorFlowEstimator(steps=100000, optimizer=Adagrad, learning_rate=0.03, batch_size=100, n_classes=0, verbose=1, class_weight=None, continue_training=False, clip_gradients=5.0, params=None)"
173 | ]
174 | },
175 | "execution_count": 6,
176 | "metadata": {},
177 | "output_type": "execute_result"
178 | }
179 | ],
180 | "source": [
181 | "regressor.fit(X['train'], y['train'], monitors=[validation_monitor], logdir=LOG_DIR)"
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {},
187 | "source": [
188 | "## Evaluate using test set\n",
189 | "\n",
190 | "Evaluate our hypothesis using test set. The mean squared error (MSE) is used for the evaluation metric.\n"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": 7,
196 | "metadata": {
197 | "collapsed": false
198 | },
199 | "outputs": [
200 | {
201 | "name": "stdout",
202 | "output_type": "stream",
203 | "text": [
204 | "Error: 0.000294\n"
205 | ]
206 | }
207 | ],
208 | "source": [
209 | "predicted = regressor.predict(X['test'])\n",
210 | "mse = mean_squared_error(y['test'], predicted)\n",
211 | "print (\"Error: %f\" % mse)"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "## Plotting\n",
219 | "\n",
220 | "Then, plot both predicted values and original values from test set."
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": 8,
226 | "metadata": {
227 | "collapsed": false
228 | },
229 | "outputs": [
230 | {
231 | "data": {
232 | "text/plain": [
233 | ""
234 | ]
235 | },
236 | "execution_count": 8,
237 | "metadata": {},
238 | "output_type": "execute_result"
239 | },
240 | {
241 | "data": {
242 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYMAAAEACAYAAABRQBpkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XV4lFf6//H3iZAAIRAsSAhOcHcp7mSCOy1SSm13f9tu\n9VuhsrRQ3ZaWtkCxQoEQIAluDYtT3K0UCRLcQjzn90dSStlgycyckft1XXMxM3ky5zMPz+SeR845\nSmuNEEII9+ZhOoAQQgjzpBgIIYSQYiCEEEKKgRBCCKQYCCGEQIqBEEIIrFAMlFI/KqXilFJ7H7DM\nV0qpo0qp3UqpOjltUwghhHVZY89gCtDpfj9USnUBKmitKwLPABOs0KYQQggrynEx0FqvA64+YBEL\nMC1z2S1AAaVUYE7bFUIIYT32OGdQEjh91+NYIMgO7QohhHhE9jqBrO55LGNgCCGEA/GyQxtngFJ3\nPQ7KfO4vlFJSIIQQIhu01vd+4X5s9tgziAKeBFBKNQauaa3jslpQay03rXn33XeNZ3CUm6usi6TU\nJP61/BX8PyhM4dDPqdfkFhMnamJjs1g2SbN8dQLNX/oa9UogNV79G6fO3XaZdSHbhXVv1pLjPQOl\n1M9AS6CwUuo08C7gDaC1/l5rvUQp1UUpdQyIB4bltE0hnMmp66cIm9mbc0eKU3L9Ab4cU4QOHe6/\nfK5c0KGNLx3avMjR04Po+t2zlBvTkLanWtsvtHA7OS4GWusBj7DMizltRwhntDduL+2mdCFh7d/5\nW71/8e5GRa5cj/77FUsFcPjD2bweMYFPvnyV3v8YzKxPGj7WawjxKOxxzkA8platWpmO4DCceV3s\nOr+LVpM6kr70S+a8NYDOnbP3OkopxvZ+Hp+r1xh7shvN+0exZnpj/Pysm9eZOPN24aiUNY855YRS\nSjtKFiFy6ujlozSc0BK17CvWjO9N7drWed3ow0voO3MYFTcvZ/282vj7W+d1hfNSSqGtcAJZioEQ\nVnYx/iI1vmpEwso32Pj1SKpVs+7rz90fzog5L1Nz6xbWRBbHx8e6r38vpXL8d0ZYSVZ/I6UYCOGA\nUtNTafxNRw6sbMj60R9Rt65t2hn9y/t8vWwJLX+PYd5sXzxseF1g5h8b2zUgHsn9/h+sVQxk1FIh\nrOjFhW+wb48nc0Z9aLNCAPBuq7dpXTeYTQX+zvvv264d4T6kGAhhJVEHljJly1xeLf8zoV09bdqW\nUoofu0/Cp8pqvl41n6VLbdqccANymEgIK7h8+zJlxtak5m8/sX5Ga+x1mH1L7BY6T7fgMWk7O2KC\nCA62fhtymMgxyGEiIZxAzynPoff1I/JL+xUCgEZBjfhnsxcpNPQZhg3XpKfbr21XVqZMGdasWQPA\nmDFjGDlypM3bjImJoVSpUg9f0EakGAiRQz/viGLjsd3MHjmGwoXt3/7rzV8nV5FYTvnP5ttv7d++\nK7r7Cqo333yTiRMnPvR3hg4dyttvv23LWDblUMVA9kSFs4lPjmfUwr/TMfVbunXyNZLB29ObSaET\nud7oJd75+DLHjxuJ4bBSU1NNR3AKDlUM5swxnUCIx/P87H+TcrwpM95vazRHo6BG9K/Zh4qj3uQf\n/zAaxW7KlCnDxx9/TLVq1ShYsCDDhw8nKSmJmJgYgoKCGDduHMWLF2fEiBForfn444+pUKEChQsX\npl+/fly9+uecXDNmzKB06dIULlyYMWPG/KWd0aNHM2TIkDuP169fT9OmTQkICCA4OJhp06YxceJE\nZs2axbhx48iXLx9hYWEAnD17ll69elG0aFHKlSvH119/fed1EhISGDp0KAULFqRatWr8+uuvNl5j\nD2F6xL27Rt7TxUqk6qtXtRBOYX/cQe31RmH9zfSzpqNorbW+cvuKLjquqC7dYI+OirLe62b8mXA8\npUuX1jVq1NCxsbH6ypUrulmzZvqtt97SMTEx2svLS7/++us6OTlZJyQk6C+//FI3adJEnzlzRicn\nJ+tRo0bpAQMGaK213r9/v/bz89Pr1q3TSUlJ+qWXXtJeXl569erVWmutR48erQcPHqy11vrEiRM6\nX758evbs2To1NVVfvnxZ79q1S2ut9dChQ/Xbb799J19aWpquW7eu/uCDD3RKSoo+fvy4LleunF6+\nfLnWWuvXXntNP/HEE/rq1av69OnTulq1arpUqVL3fb/3+3/IfD7nf4Ot8SJWCQK62d8m6tdfv++6\nEMKh1BkXqkv1+1Snp5tO8qfxW8br2l+01eXKp+vkZOu85sOKQcYB3pzfHleZMmX0999/f+fxkiVL\ndPny5XVMTIzOlSuXTkpKuvOzKlWq3PnjrrXWZ8+e1d7e3jo1NVW/9957dwqD1lrHx8frXLly3Vn+\n3XffvVMMxowZo3v27JllnqFDh+q33nrrzuPNmzfr4ODgvywzZswYPWzYMK21/kth0FrrH374QQcF\nBd33/dq6GDjUYaKjQe/w/ZSbnD1rOokQD7b62Dr2xu1l8qgX7Xr10MOMqj+KJO+z+NdbxOTJ9mnT\nWuUgO+6++iY4OJizmX88ihQpQq67hnY9ceIEPXr0ICAggICAAKpWrYqXlxdxcXGcO3eOoKA/Z+LN\nkycPhQoVyrK906dPU65cuUfKdvLkSc6ePXunzYCAAD766CMuXLgAZBxCuje/SQ5VDDpVak/IiLHS\no1I4NK01I2a/QvULH9K+tY0HBnpMXh5efNbhM642eIX3P0zl9m3TiWzr1KlTf7lfokQJ4H/HUwoO\nDmbZsmVcvXr1zu327duUKFGC4sWLc/r0n9O03759m8uXL2fZXnBwML/99luWP8uqzbJly/6lzRs3\nbrBo0SIAihcv/j/5TXKoYvB+q/c57D+B8MUXOXLEdBohsvbTjgjOnE9m2isPncrDiE4VOlG6cCAl\nO81k/HjTaWxHa823337LmTNnuHLlCv/+97/p379/lss+++yzvPnmm3f+4F68eJGoqCgAevfuzaJF\ni9iwYQPJycm88847pN+nw8bAgQNZtWoV4eHhpKamcvnyZXbv3g1AYGAgx++6lKthw4bky5ePcePG\nkZCQQFpaGvv27WPbtm0A9O3bl48++ohr164RGxv7l5PLJjhUMShdoDT9q/ej6jPjGD3adBoh/lda\nehovL36b5okfU7OGQ3187lBK8UHrD4ir/D6ffJ7CtWumE9mGUoqBAwfSoUMHypcvT8WKFXnrrbfQ\nWv/Pt/R//OMfWCwWOnTogL+/P02aNGHr1q0AVK1alW+++YaBAwdSokQJChYs+JfDN0qpO68XHBzM\nkiVL+OyzzyhUqBB16tRhz549AIwYMYIDBw4QEBBAz5498fDwYNGiRezatYty5cpRpEgRnnnmGW7c\nuAHAu+++S+nSpSlbtiydOnXiySefNDpCrMMNR3Hmxhmqf1sD9e0Btq4pRoUKppMJ8afpO+bw9MT/\nsP35DdSo4UAnC7LQfkZ7bm3uS2jJkbz5ZvZfx1GHoyhbtiyTJ0+mTZs2pqPYhdsNR1HSvyRP1X6S\nisM+ZuxY02mE+FO6Tuf1xR/SIOFthy8EAB+0/oCTZT7kP+OTXP7cgcg5hysGkNG9/nDuaYQvjeOu\n8zpCGDV3z0Iuxfny1d86mY7ySBoHNaZm8SqU6DyDH380nUY4Ooc7TPSH5xY9x86NhWgU/yH/+Y/B\nYEKQcbKy9Jh6FNk/mu2zLKbjPLKYEzE8FT4K/c0Bfjvqibf347+Gox4mcjdud5joD/9q+i+O+H/H\ntJ9vcuWK6TTC3S0+soQLF9P55OlQ01EeS8vSLSkWUID8DSOZNct0GuHIHLYYlC9YnvYV2lKx/0Qe\nYcBAIWzq/5Z8StGjr9G6teOfK7ibUorXmr1GSsOxfPGlznbnLuH6HLYYALza9FVOl/yC8ROSSUkx\nnUa4qx3ndnDk4jHe7tXboXobP6qwkDC0zzUu+61l/XrTaYSjcuhiUK9EPaoXDyFPw59ZsMB0GuGu\nRi//As/tf2fIwGwccHcAnh6evNLsFfJ1Hovhfk3CgTl0MYCMcwfJ9b7gy//I/q2wv9gbsaz4fTEv\nNB6Jr5npCqxiSM0hXPLewfJth+UKPZElhy8GHcp3wDvPbY6nbCCzF7cQdvPpf8ejdw/hn88VMB0l\nR3y8fHim3kjK9BvPhAmm0whH5PDFwEN58GLDFyka+jXff286jXAn8cnxTNoxifb+f6dYMdNpcu7Z\n+s9y0n8mP0y7QUKC6TTWcfdcxdk1depUWrRoYaVEzsvhiwHA0NpDOe29krlLz5A5rIcQNjdr7894\nnGnKy8PKm45iFUH+QXSo0I4i7acxb57pNNYhfSCsxymKgb+PP4NqDqRE2HdyrbSwm8//+x1+h56j\nVSvTSaznbw3/xo2Q8UyclPWonM5kyJAhnDp1itDQUPLly8enn37K5s2b70xJWbt2bdauXXtn+alT\np1K+fHn8/f0pV64cs2bN4tChQzz77LNs2rSJfPnyUbBgQYPvyDBrzJBjjRsPmero4MWDOuDfgbpm\n3USHmllKuKatsVu13/+V0R+PTTUdxarS09N1zW9r6QL1l+nDhx/tdx722TSpTJkyd2Yki42N1YUK\nFdJLly7VWmu9cuVKXahQIX3p0iV969Yt7e/vr48cOaK11vr8+fN6//79Wmutp06dqps3b27mDTyG\n+/0/YKWZzryMVqLHULlwZeoG1WB3/gi2bRtIgwamEwlX9tXG70jZMophMz1NR7EqpRR/a/QiY89O\nYNKkjowbZ4XXfM86nS/0uzk73PPTTz/RpUsXOnXKGDuqXbt21K9fn8WLF9O7d288PDzYu3cvQUFB\nBAYGEhgYmNGuHGYCcJ5iADCq3jO8duJbfvhBioGwnasJV4k4OJ+ORQ5TtKjpNNbXv3p/Xl72Cj/+\ndI4PPyzOXbNDZktO/4hby8mTJwkPDyc6OvrOc6mpqbRp04Y8efIwZ84cPv30U0aMGEGzZs347LPP\nCAkJMZjYsTjFOYM/WEIs3PDZz+yVR7l1y3Qa4apm7JmB7+lOvDjMBSsB4JfLj77Ve5OvxRQyZ2B0\nWndPBhMcHMyQIUP+Ms3kzZs3efXVVwHo0KEDK1as4Pz581SuXJmRI0f+z2u4M6cqBj5ePgyt8yRF\nO06WHsnCJrTWfLXxO9T253DlOVNG1htJfMgkfpjo3CeSAwMD78xJPGjQIKKjo1mxYgVpaWkkJiYS\nExPDmTNnuHDhApGRkcTHx+Pt7U3evHnx9PS88xqxsbGkuPmYN05VDABG1BnBlVLTmDrDvf/jhG1s\nPL2R6zc0g1q0wNO1Thf8RYMSDSgWkI8NZ9dw7pzpNNn3xhtv8OGHHxIQEEB4eDiRkZGMGTOGokWL\nEhwczGeffYbWmvT0dL744gtKlixJoUKFWLduHRMye9+1bduWatWqUaxYMYq64nHBR+Sw8xk8SLPJ\nLdj93UscWtCDoCAbBxNuZUTU00RNqUT0a6/SuLHpNLY1fut4Ppu3jhcD5/Dyy/dfTq7ldwxuO5/B\ng4yqP5KC7SYyc6bpJMKVxCfHE74vAt/DQ2jUyHQa2xtUYxAX/Zczde4l01GEA3DKYtC7am+u+W1m\n8rxTMj67sJqIgxEUSWzK4LDiTjlU9eMKyB1Aj6oWTheczr59ptMI05yyGOTxzsPAWv24VOInduww\nnUa4iqm7pnJj7TAGDDCdxH6G1xlGrobTmDHDdBJhmlMWA4Anaw3Bo850pk2XXQORc79f/Z2dZ/ZS\n+EooNWqYTmM/Lcu0xDPvVaYu3U26c19YJHLIaYtBk6Am5PVPZcbqbaSmmk4jnN203dModaM/g/r5\nuMUhoj94KA+G1RuMrjmDmBjTaYRJTlsMlFIMrzuEXA2mk8MRbIWbS9fpTNs1jdjoYfTvbzqN/Q2p\nOYTESjOZNkO+Vbkzpy0GAENqDSG+7Gxmzk42HUU4sbUn1uKR4k/5vHWoUMF0GvurUqQK5QuXImLn\nahITs15GKSU3wzdbc+piUC6gHFWLhjB/zzKSpR6IbPppz08Uin2SAf3d6PjQPYbXG0LeJtNZvvx/\nf2aNETHlZrXRnW3GqYsBwNMNnsSn4XRWrDCdRDijxNREFhxawJH5/enXz3Qac/pX78+N4ouYOU9m\nj3JXTl8M+lTtQ3yxlUwPv2o6inBCS48upaRnLWqXL0nJkqbTmFMkbxFalm7Fot8iXGZKTPF4clwM\nlFKdlFKHlFJHlVKvZfHzVkqp60qpnZm3t3La5t0CcgfQrmwHoo/PlY1YPLZZ+2bhc3SgW/UtuJ+R\nDZ7Ep9F0li0znUSYkKNioJTyBMYDnYCqwAClVJUsFl2rta6TefswJ21m5ekGg/GpP5OlS639ysKV\n3Ui6wYpjKziysBe9e5tOY17XSl1JDtjFtPlnTUcRBuR0z6AhcExrfUJrnQLMBsKyWM6mZ+Y6VehE\nSsA+pkTE2rIZ4WIWHFxABe9WtKhfkEKFTKcxz9fLl9BKYSyPDef2bdNphL3ltBiUBE7f9Tg287m7\naaCpUmq3UmqJUqpqDtv8Hz5ePlhCwlh5NlwmvRGP7Od9P8OegW7Zt+B+htbvj0/d2SxZYjqJsLec\nTnv5KNc67QBKaa1vK6U6AwuBSlktOHr06Dv3W7VqRatWrR45yNB6/Vla710WLfqnfLjFQ8XdimPT\n6c3oxfPp/o3pNI6jbdm2pBUYzJQFv9O7d1nTcUQWYmJiiLFBd/EczWeglGoMjNZad8p8/AaQrrUe\n+4Df+R2op7W+cs/zjzyfQVZS0lIoOKYETfdvZfls2YjFg43fOp6fYrZQ6tcZhIebTuNYnpo3ijnf\nleNy5GvkzWs6jXgYR5nPYBtQUSlVRimVC+gHRN29gFIqUGV2n1NKNSSjAF3535fKGW9Pb3pX7cXa\nS3O5IZdKi4eYtXcWCVvlKqKsDK3Xn1x15jj9/Mji8eSoGGitU4EXgeXAAWCO1vqgUmqUUmpU5mK9\ngb1KqV3Al4DNDuI8VS/jeGd0tK1aEK7gxLUTHL50lJO/tKNLF9NpHM8TpZ/Aw/8ckyMPm44i7CjH\n/Qy01ku11iFa6wpa648yn/tea/195v1vtNbVtda1tdZNtdabc9rm/bQIboGHfxyTF8pGLO5v3oF5\nhKT3oHuoN76+ptM4Hk8PT/rV6Mt/L8/h5k3TaYS9OH0P5LtlbMR92HB9DteumU4jHNW8A/O4ur6P\nXGjwAE/V64d3ndlERsp8Ie7CpYoBwFN1+5Or7mwWLpSNWPyvU9dPceTSMS5tb0XbtqbTOK7GQY3x\nyRfPpCiZD9NduFwx+GMjnrxor+kowgFFHIigfEoYvXt44+1tOo3j8lAeDKrVj003Z8tetptwuWKg\nlGJgrT5sjZ/HFatfsySc3byD87i8Tg4RPYpBdXqTq3YECxbIXrY7cLliADCgVi98akewcKHpJMKR\nxN6IZX/cIRIPtqF5c9NpHF+DEg3w8bvN5OgDpqMIO3DJYtAoqBFeftf4MeqQ6SjCgcw/OJ8ySaH0\n75MLT0/TaRyfUop+NXuyLT6Cy5dNpxG25pLFwEN50Kd6D7bFz+fSJdNphKMI3x/OxbVyiOhx9K/Z\nC986ESxYYDqJsDWXLAaQsRHnqScbschw9uZZ9pzfT67T7WjQwHQa59G0VFPwi2Nq1DHTUYSNuWwx\naFG6BWl+p5m68HfTUYQDWHBwAaUSujGgrw92mFvcZXh6eNK7eg+23Y7g4kXTaYQtuWwx8PLwome1\nMLYnzOfCBdNphGnhB8KJ+6W3HCLKhv41e5G3fgTz55tOImzJZYsBQL8avfBrIBuxu4u7FceOM7sp\ndK0DNWqYTuN8WpZuSYrfcaYtPGU6irAhly4Gbcq2ISnfIWYslGn83NmCQwsoHt+ZQf185RBRNnh7\netO9qoWdifM5f950GmErLl0McnnmIrRyV3YmLJCN2I0tPBTJhbU96NfPdBLn1bd6T/I1mk9EhOkk\nwlZcuhgA9K3eC//GcqjIXd1Musl/T6ynVHJHKmU5v554FO3Ltee2315mLJBvVa7K5YtBx/Iduem3\nnRnz5VIId7TitxUUTmhK/x7+pqM4NR8vH7qGdGZvykLOnDGdRtiCyxeD3N656VypI3uSIjkrpw7c\nTuThKK5vtdCrl+kkzq9vtV7kbxLBvHmmkwhbcPliANCnWi8CmsrxTneTmp5K1MHFFLseSkiI6TTO\nr3PFzlzLu4VZ86+ajiJswC2KQeeKnbnit55ZETJtkzvZdHoTPonBDOgSbDqKS8jjnYe25VtzIGUJ\np+QqU5fjFsXA38efZqWbsPf2cjne6UYWHookcY+F3r1NJ3EdPaqEUbh5pFyQ4YLcohhAxkYc2DJS\njne6Ca014XsiCYizUK2a6TSuo1ulblz0X0H4/CTTUYSVuU0xsIRYuJh/CfMWpJiOIuzg8OXDXL2V\nwMA2daSjmRUVzVuUmsWrs/vmGum742LcphgE+QdRsUhZdl5aT1yc6TTC1iIPReFx1EKf3lIJrK1H\n5TCKt4qUEYFdjNsUA4AeVbpTsm0kkZGmkwhbm70zitynLNSubTqJ6wmrHMblwlHMi0g3HUVYkVsV\ng7CQMK4GRjIvQuZ0dWUX4y9y8PJeBjZtLYeIbKBSoUoU9c/PltPbZAY0F+JWxaB60erkzaPYeGwP\nV+VSaZe16MhivE+3p39vH9NRXFb3KmEEtYskKsp0EmEtblUMlFJ0rxxGqfaRREebTiNsZea2KHL9\nbpEZzWyoe+XuxAdFSkdOF+JWxQAyjncmlpaN2FUlpiay/uxq+tTuIoeIbKhhyYakeF0mZs8xbtww\nnUZYg9sVg+bBzbnhcZLV205zUzoku5w1v6/B82ItBvcsbDqKS/NQHlgqhxLcPpJFi0ynEdbgdsXA\ny8OLrpW6ENw+iqVLTacR1jZ9axSexyw0bWo6iesLCwkjraLsZbsKtysGkLERU1k2YleTrtNZeiya\n0EpheLjllm1fbcu15ZzezYr1l7h923QakVNu+ZHpWKEjp9M3s+yX6yQkmE4jrGX72e2k3PLnya4V\nTUdxC75evrQv347gdotYtsx0GpFTblkM/HL58USZFpRsvZSVK02nEdYyZ1cUaQcttGxpOon7CAsJ\nw7uG7GW7ArcsBpCxEeeuLRuxKwnfE0XD/BZ8fU0ncR9dK3blt/TVLF5xmyQZu86puW0xsIRY+I1l\nRC9JJjnZdBqRUyeunSDu9jkGt2xsOopbKZSnEPVL1qNE81WsWmU6jcgJty0GxfyKUblICMUa/ZeY\nGNNpRE4tPBiNPtKVbl09TUdxO5ZKFvI3iJa9bCfntsUAMg4VBTSRQ0WuYOavUZS8ZaFkSdNJ3I8l\nxMJvXtFERqWTlmY6jcguty4GlhALJ3JFsWixRsvYdU7reuJ19lzZQt967U1HcUvlC5anqF9hCtbY\nyubNptOI7HLrYlC1SFV8fbzwKL6HvXtNpxHZtezYMnKdb0GPrn6mo7gtS4iFIs2jWLzYdBKRXW5d\nDJRSWCpZKNkmiiVLTKcR2fXzjig4LAPTmWQJsXA+v3yOnJlbFwPI2IivFJGN2FmlpKWw8sRSOpXr\nJr2ODWpYsiG3ucSJG78RG2s6jcgOt//4NA9uzqW039h+9IzMceCE1p9aj9fN8vTtJGeOTfJQHoRW\nCqVcZ/li5azcvhh4e3rTuWJnynVcJL2RnVDE/igSd4XRoYPpJMISYiGxtBQDZ+X2xQAyLjFNrxQp\nJ7+cjNaaeXsjqeVrIX9+02lE23JtOZ26nTWbrkhvZCckxQDoWL4jJ/U6lqy6RbrM8e009l/cT/zt\ndPq1qmE6igDyeOehTbnWBDZbyrp1ptOIxyXFAMjvm5+mwU3wqbqC7dtNpxGPKvJQFPqQhW7dZEoz\nRxEWEoZPbdnLdkZSDDJZQiwUaCTXSTuTObui8D9noVIl00nEH7pW7MpJzxUsWibHiZyNFINMoZVC\nic2zmMVLpD+9Mzh/6zxHrx6mV/0nZK5jBxLoF0iNYlW57LeWY8dMpxGPI8fFQCnVSSl1SCl1VCn1\n2n2W+Srz57uVUnVy2qYtlC5QmtIBJTl4axNxcabTiIdZdGQRec91xNI1l+ko4h6WEAuBT8i0ss4m\nR8VAKeUJjAc6AVWBAUqpKvcs0wWooLWuCDwDTMhJm7bUvXIYJdpEyqxNTmDe3ihu77TwxBOmk4h7\nWUIsXCqUMeaXcB453TNoCBzTWp/QWqcAs4Gwe5axANMAtNZbgAJKqcActmsTlhALN4rLeQNHdzvl\nNmtPxtAmuDM+PqbTiHtVKVwF/7w+rDu6i/h402nEo8ppMSgJnL7rcWzmcw9bJiiH7dpE3eJ1Ublu\nsWzbYVJSTKcR97Pq+Cr8b9WnZ+cA01FEFpRSdK9sIbBFFL/8YjqNeFReOfz9R90PvPcUX5a/N3r0\n6Dv3W7VqRatWrbIVKruUUnSvYiG6QRQbN74ic+k6qIUHo7ixzUKXl0wnEfdjCbEwp/xLLF78Lt26\nmU7jWmJiYoixwYxcSudgIH+lVGNgtNa6U+bjN4B0rfXYu5b5DojRWs/OfHwIaKm1jrvntXROsljL\n0qNLGTVzDP1vr2PcONNpxL3SdTqFPypOyWUb2fvf8qbjiPtITU+l8NhA8k7fTeyBILniy4aUUmit\nc7yGc3qYaBtQUSlVRimVC+gHRN2zTBTwJNwpHtfuLQSOpHXZ1lzx3kPUqoumo4gsbD2zFY/EIvRq\nI4XAkXl5eBEa0oXE0lEcOGA6jXgUOSoGWutU4EVgOXAAmKO1PqiUGqWUGpW5zBLguFLqGPA98HwO\nM9uUr5cvHSq042zeJZw8aTqNuFfkoUg4FEbXrqaTiIcJqxxG3roycJ2zyNFhImtylMNEANN3T+et\nnxbyRrn5PPec6TTibhW/rMaVH3/k4q5GMn+Bg7uZdJPAcSWoG3OG9av9TcdxWY5ymMgldanYhUv+\nq4lakmg6irjLsSvHiLtxGUv9BlIInEA+n3w0C27G9msruH7ddBrxMPKRykLhPIWpXawWa0+tISHB\ndBrxh+jD0fifDyW0m2y2zqJHVQsFm0TJXCFOQD5V99GzmoX8DaOwwRVcIpsWHIji8gYL7dubTiIe\nVWilUK4RVxgnAAAeGElEQVQXXUL04lTTUcRDSDG4D0uIhdtB0SxeIhMcOIIrCVfYdnY7TYu3JV8+\n02nEoyqVvxRlAkoTvWuDzBXi4KQY3EelQpUo6JePBZt34CDntd3a0qNLKXKrNWFd8piOIh5TnxoW\nqBzFzp2mk4gHkWLwAH1qhnGzZCSHDplOIiIPR3Fti0UuKXVClhBL5rSy8q3KkUkxeICwEAve1eU6\nadOS05JZemQ5gde7UV76mjmd2sVqkyt3MhFr5VuVI5Ni8ACNgxqTlvsc81adMB3Fra09sZb8KVXo\n3t4hB7sVD6GUolc1C4dVJBelY7/DkmLwAJ4enoSGdGVHfDQ3bphO476iDkehD1pkwDMn1qOqhbx1\nZcIbRybF4CF6VrOQr55cJ22K1poFB6O4td1C06am04jsalm6JQl+Bwhf4rDDkrk9KQYP0b58e+ID\ntjAvWrpQmrAnbg/JCd50aVgVr5wOuC6M8fHyoX25Dqw+vZikJNNpRFakGDyEXy4/mpRswaLDy0iV\nfjN2F3k4knxnQ+nWVcZAdnZ9alrIXTtSOnI6KCkGj6B/7TC8qkWycaPpJO5nwcFIzsWE0amT6SQi\np7pU7EJ80V+IiLptOorIghSDR9CtUjcSg5axIErmwrSn09dPc/zySeoWbk6hQqbTiJwqmLsgtYrW\nY/6u1dKR0wFJMXgEJfKVoHxABeZuXicbsR1FHY6i2M0uhHaVkwWuol9tCynloti1y3QScS8pBo+o\nX20LN4pHSW9kO4o8HMmVjWFySakLCQuxkFoumoVRMlCRo5Fi8IjCKlvwqBJFZKTsGtjD9cTrbDy1\nmTznOlK1quk0wlrKFyxPUb9CzFm31XQUcQ8pBo+oRtEa+OZOZ/aa/aajuIVlx5ZRIrUFPbr6yWTq\nLqZf7TBO5Y4iNtZ0EnE3KQaPSClFnxphHNJRxEm/GZuLPBxJ0u4wunc3nURYW/cqFnLVjCIqynQS\ncTcpBo8ho0t9JIsWmU7i2lLSUlhyZBk3t3ejeXPTaYS1NSzZEPJcYvby30xHEXeRYvAYnij9BIl+\nR5i75JzpKC5t7cm1FEiriKV1Cel17II8lAdhlbux5ZqM+eVIpBg8Bm9PbzqV78Ta84u4Lf1mbCby\nUCSex+QQkSvrXT2MvPUiWbHCdBLxBykGj6l3DQt+daNYtcp0EteUMTBdJOfXhtGhg+k0wlbalmtL\nQoHtzI2+YjqKyCTF4DF1qtCJW4XXMi8q3nQUl7Tr/C5SEnPRvnZV8sgMly4rj3cenijVmsVHlpKc\nbDqNACkGjy0gdwB1izUgcu8qGbjOBiIPR+J/Lowe3eV6UlfXt5YF31pRMnCdg5BikA1/DFy3fr3p\nJK5nwcFIzq6RXsfuoFulbiQUX0H4fNk1cARSDLIhtFIoSaUXER6RZjqKSzl57SQnrsRSv1hTGZjO\nDQT6BVK5cBXmbYshTT5KxkkxyIayAWUpmb8YczdsIV2GWLGaqMNRFL3WlZ5hcj2pu+hby4Jn1Sg2\nbTKdREgxyKY+NS3oSlFslSFWrGbhoUgu/DeMsDDTSYS9WEIspJaPImK+jPllmhSDbLKEWFCVo5g/\n33QS13Al4QpbTv9KWd2BMmVMpxH2UqVwFfzz5mJOzG4ZHt4wKQbZVL9EffC9xuwVR2UjtoLow9EE\n3m5L3+55TUcRdqSUold1C0llI9mxw3Qa9ybFIJs8lAc9qoVyq2Q0u3ebTuP85h2I4OrGnvTsaTqJ\nsLfulcPwri572aZJMciBsJCMgetkI86Zm0k3WXM8hqLXulG5suk0wt6aBTcjwecEc5bKmNYmSTHI\ngTZl23DFZydzoi+bjuLUlhxdQmBSM/qEFjAdRRjg5eFFaOUuXC4czcGDptO4LykGOZDbOzftK7Tl\nQv4lMh1mDsw/OJ+bv/akVy/TSYQpYSEW/BvIoSKTpBjkUFiIhUJN5VBRdiWkJLDkyDLynAqjVi3T\naYQpnSp04lLuDYRH3jQdxW1JMcih0JBQzudZybxIGdM6O1YeX0mhlDr07VpUprd0Y/l88tG8dFN+\n91zGiROm07gnKQY5VDhPYRoHN+SYWiIbcTZEHIwgeVcvuYpI0LtqLwo9MY8FC0wncU9SDKygb7U+\nFG4pG/HjSk5LJvJgNGn7e9Cokek0wrQeVXoQl28ZcxfIXrYJUgysoEflHlzIt4y5CxJMR3EqMSdi\n8E+pRO8OQXjIluj2CucpTONSDdmTsIRzMrOs3clH0AqK5C1Cw6D67Lm9VDbixxBxIAIO9qRHD9NJ\nhKPoV70PhVuGExlpOon7kWJgJX2r96ZIy3AWLjSdxDmkpacx/8BCbmzpScuWptMIR9Gjcg8u5V/G\nnPlyqMjepBhYSc8qPblYYKkcKnpE60+tJ1dKMbo/UQFvb9NphKPI2MtuwNYrS7lwwXQa9yLFwEqK\n5i1K/ZJ12XJ5GZelQ/JDzdk/B59jfeUqIvE/BtTsS+GW4UREmE7iXqQYWFH/Gn0o3GKeHO98iNT0\nVObtj+DiL/1o3950GuFoelTuwaUCy/h5nuxl25MUAyvqWaUnVwotYdbcRNNRHFrMiRhyp5aiS+MK\n5M5tOo1wNEXyFqFRqfpsvy4XZNiTFAMrCvQLpF7J2my6sJy4ONNpHNecfXPwOtSPAQNMJxGOql/1\nPhRpNZd580wncR/ZLgZKqYJKqZVKqSNKqRVKqSyHnFRKnVBK7VFK7VRKufwkkf1q9KFo63DCw00n\ncUzJaclEHFjApbV96dzZdBrhqHpUyThUNCtcDhXZS072DF4HVmqtKwGrMx9nRQOttNZ1tNYNc9Ce\nU+hZpScXAxYzc44cKsrKquOr8E+pRK+2pfHxMZ1GOKqieYvSIKge+xKXEivTHNhFToqBBZiWeX8a\n0P0By7rNEGTF/IpRv2Rt9iUtlbGKsjBn/xzS98ohIvFw/av3pUirubKXbSc5KQaBWus/jozHAYH3\nWU4Dq5RS25RSI3PQntMYXGsQRdrMZPZs00kcS2JqIgsPRJGwrQ+tW5tOIxxdr6q9iPNfyow5Mqy1\nPXg96IdKqZVAsSx+9H93P9Baa6XU/aaFb6a1PqeUKgKsVEod0lqvy2rB0aNH37nfqlUrWrVq9aB4\nDqtXlV78P7+X+Sn8Oq+/nt90HIex/NhyAlJqEtq1BF4P3PKEyBirqFXZFmz0XciBA0OoWtV0Isew\ncvVKnv76aQbXGIy3p/V6bD7wI6m1vu9V4EqpOKVUMa31eaVUcSDL/oJa63OZ/15USi0AGgIPLQbO\nLCB3AO0qtOG/+eezd+8watQwncgxzN43m6Rt/RnwhukkwlkMrjmIg09MY9q0IYwdazqNY0gMSqR0\nWGn+PezfALz33ntWed2cHCaKAp7KvP8U8D+j8iil8iil8mXezwt0APbmoE2nMbjmIPK3mMmUKaaT\nOIb45HgWHV6K7++9aNLEdBrhLCwhFi75bmbqvDhSU02ncQyz9s7C78QAEqx8oVVOisHHQHul1BGg\nTeZjlFIllFKLM5cpBqxTSu0CtgCLtNYrchLYWXSt2JUrvtuZNv8cycmm05i34NACCt5qxtMDZEYz\n8ejy5spLWJVQfOvNYdUq02nMu5F0g0WHl3Issi++vtZ97WwXA631Fa11O611Ja11B631tcznz2qt\nu2beP661rp15q661/shawR1dbu/c9KzanYDms1m0yHQa86bunMHlX4bw5JOmkwhnM6jGIFStmUyb\n9vBlXV3EgQgK3mjFiAGFrP6lSnog29DA6gNJrzaLH380ncSsczfPsenUVpoUDKNUKdNphLNpV64d\nt71PEL3xKNeumU5j1pQd07m0xjZfqqQY2FCbsm247R3LugNHOHvWdBpzZu2dRf6zPXj6KRmISDw+\nLw8v+tfoR3DXWW59ufap66fYcWYP7YK7Uby49V9fioENeXp4MqB6fyr0mMn06abTmDN52wziNw4h\nLMx0EuGsBtUYxI3SM/l2gkbf7yJ2Fzdzz0zynOjDcyNt03VfioGNDao5iLjAn5j8Y7pbbsR74/Zy\n5soVnmrV0uonvIT7aFiyIb6507maexubNplOY39aayZunYHH3iE2G/ZdioGN1StejwJ+uUktsY61\na02nsb+pO2eQunMQLzwvm5rIPqUUT9Z6ktKW6UyYYDqN/W0/t53L15J4PrQpnp62aUM+oTamlGJ4\nneEU6TCFiRNNp7GvtPQ0pmybSU09hJAQ02mEs3uq1lMc9PyZqCWJXLpkOo19/bh9BsnbBzN8uO2u\ny5ZiYAeDaw7miEcki1bc5MoV02nsZ83va0i+UozXhss4AiLnShcoTb0SdanVb6FbdeZMSUth5q7Z\nNPIdTFCQ7dqRYmAHRfMWpVWZllTpPZcZM0ynsZ9P10zGe/9wunUznUS4iuF1hpNU9UcmTIC0NNNp\n7GPRkUVwtSIvDqxo03akGNjJ8DrDia+UcajIHU4kX7p9iZjYZbz4xEAZlE5YTffK3fnt9g7yljzJ\nsmWm09jHVxsmkf7rSEJDbduOFAM76VyhMxfTjnHL5zCbN5tOY3tTtv8Eh0MZ9VSA6SjChfh6+dK/\nen8q9JrmFieST18/zebYTQyp28fmk0FJMbATb09vhtQcQtmeU13+RLLWmq83TKJa8tM2PcYp3NPw\nOsPZqaewbn06F7IcK9l1TNk5Fa9D/XlmWB6btyXFwI6G1RnGwVzTmL8wlRs3TKexnS1ntnDlehIv\ndH3CdBThguoUq0OB3Plp0CeGOXNMp7GddJ3Ot5snU+L809Subfv2pBjYUdUiVSlbsDRVwxYza5bp\nNLYzftMkUn99mj59ZHhSYX1/XK6dVmsyM2eaTmM7q4+vJvl6QV7oWdcu7UkxsLPn6z9PUs0JTJ1q\nOolt3Ey6yfyDEXQMfAp/f9NphKsaXHMwu+KXcDzuAkePmk5jGxO2TCJhw9MMHGif9qQY2Fmfan04\nlbqd49eOcfCg6TTW9/O+n/E934pnBmY1W6oQ1lEwd0F6VO5Bxb4/uuRe9oX4Cyw/toL2xQZSuLB9\n2pRiYGe+Xr4Mqz2MMr2/c7nx2bXWfL5+PHrLC3ToYDqNcHUvNHiB3wt9x4yZaS53ufbE7RPJe7I3\nzw4tYLc2pRgYMKreKI7lnca0mQkuNZXfulPruHQ1mSHN2+JtvXm6hchSvRL1KBUQSGKpJaxfbzqN\n9aSmp/L15u9Q216gY0f7tSvFwIDyBcvTOLgBuRvMZYULTQL69ZbxpG16keHD5MSxsI8XGr6AX+tv\n+P5700msJ/JQJB43yjAqrLbNBqXLihQDQ55v8Dy6/rcu0+fgzI0zLDuyigrxT9rlMjghAPpW68vl\nXDuIXHeMy5dNp7GOrzaP5+bqFxk+3L7tSjEwpHOFzujcF1hzeCunTplOk3M/bP+BQmcH8uJIuYRI\n2I+vly/D6wyjVI8JLnEObt+Ffew5e5jG+XtQpox925ZiYIinhyf/aPx3Art/7vS7uEmpSUz49Qeu\nrXyevn1NpxHu5rkGz3GmyFS+nXzD6U8kj986Hv+jo3hmRC67ty3FwKARdUdwId9Kfph7gqQk02my\n76c9P5EvvjbDulUlt0xzLOysTIEydA5pT3zlSU59Du5C/AVm7ZlD6qZn6dHD/u1LMTDI38efZ+qP\nIHfr/xAebjpN9qTrdMZu+IRLka/y/POm0wh39a+m/yK5zpeM+yzFdJRs+2brNxS73Jd/PB1oZKRf\nKQaG/b3R37kaPI2Pv7zmlLu4i44sIvG6H23Lt6KibYdbF+K+6peoT42S5dmZFM6uXabTPL745Hi+\n2TqBuAUv8/TTZjJIMTAsyD+I7lW7cqXsDyxZYjrN4xu7fhyJq1/l1VfkclJh1ivN/kXutp/wyafO\n963qx50/UvBWC4ZZKlGwoJkMUgwcwMtNXyax9n/48ONEp9o72Hh6I8fizlIprSeNG5tOI9xd54qd\n8Q9IJnrfak6cMJ3m0aWmp/LJhs+Ii3iVl14yl0OKgQOoXaw2TcvW5Zj/JNatM53m0X28/mN8drzM\nq/+SqcyEeR7KgzeeeI0Clg/49DPn+VY1Z98cPG8F071BI4KDzeWQYuAgRrd6l9TGH/PBR4mmozyS\nbWe3senEDnIfHCFzHAuHMbDGQLwLnmPa2l+cYuKbtPQ03l/7Adej3+Zf/zKbRYqBg6hfoj5Nytbm\n17TJbN9uOs3DjY4ZTdEjr/PKP33xkK1IOAgvDy9Gt34bv26j+eprx987+Hnfz6TfKkKjIu2oUcNs\nFvkYO5D3Wr+Lav4x/x7r2J0Otp7ZyrbY3Vxe+TSDB5tOI8RfDagxgNyF4/g6eg03b5pOc3+p6am8\nt/Y9Epe9x2uvmr8AQ4qBA2lQsgGNytRk5eVJHDpkOs39jY4ZTYnf3uDlf/ji62s6jRB/5eXhxfvt\n3iZXx3f5/nvH3TuYuWcmPkklCbzdmpYtTaeRYuBwPmr/ITzxIR9+4piTJK89sZZdZw5wdtEIXnjB\ndBohsjag+gD8il7mo3lLHLJ3f2JqIqPXjub24vd5522FMr9jIMXA0dQpXoeuldsTcX6cww1gl67T\neWnFS5Q8+BGvv+JDnjymEwmRNU8PT74O/YSklq8weYrjTRry1ZavKJRSi6IJTxAaajpNBikGDmhc\npw+hwQTe+CjWdJS/mLlnJkkJXpxZ0Z9Ro0ynEeLBulbsSpVSxXlj3kSHOndwMf4i4zaM48LMcYwZ\ng0PsFYAUA4cUnD+Y5xqMYv61txyma/3tlNv835r/w2PF57w3WsmAdMLhKaWY2Pszkpu8x3tjr5uO\nc8fomNHUYBAVC1aiTRvTaf4kxcBBjW73Oj5VVzL0nQ0O0St57PqxFE9vhPf5ZnafdEOI7KpdrDaW\nKl0Yv+89hzjsuiduD3P2zWXv1+/wxRem0/yVFAMH5e/jz3fdv+BQhVHMmpNsNMuhS4cY/+s3nPrh\nC776CrtOxSdETo0PG4tX7Zk8+dp2o1+s0nU6z0Q/Q+Wz/2ZI70LUrGkuS1akGDiwftX7UKdcMM9O\n+5xLl8xk0Frz7KJnqXnlHdo1DKJZMzM5hMiuInmL8GW3cWwOHMmccHMnk7/f9j0J8V78Fv40o0cb\ni3FfUgwcmFKKWYO+IbXhpzz1z2NGMkzaMYnzV25xaMYLfPmlkQhC5NiIek9SvXwAz0z+j5G5kk9f\nP807v7zD9Znf89V/PMif3/4ZHkaKgYMrG1CWD9q9zZoCg5k1x74Tdxy7cow3Vr/J7ZnT+O5bTwoV\nsmvzQliNUorZg74ntfHH9H1xv10PF6XrdJ5a+BTlL/2T5pWq0aeP/dp+HFIMnMBLzf9G7SoFGDnj\nQ7v1TE5NT2XI/CGUOv42netXIyzMPu0KYSsVClbg864fs6nEAMZ9br8BIT/f9DnnL6Rwdu5rjB9v\nt2Yfm9KOcKkKoJTSjpLFEZ27eY7KX9Yh/6rZ7Ituhb+/bdt7Y9UbLNiygzzzl7Jxg4cMOyFcgtaa\nrtP6EhNdjBX//JrmzW3b3pbYLXT5KRT9w1ZWzC1D/frWb0MphdY6x70VZM/ASRTPV5yIQT9xueUA\nLE+eINmGFxjN3T+XH7fO5trkmcyPkEIgXIdSipn9fiCg4TK6/t8Um+5pn7t5jp6ze5Nr2SQ+f8c2\nhcCapBg4kXbl2vF+x9fYGRLGwKG3SEuzfhs7z+3kmcgXSP5pPovDC1OmjPXbEMKkgNwBrB4RDe1e\np/VTGzh50vptJKYmEjarJ+m/juL5thaGDrV+G9YmxcDJvNT0H/Ru0og1gRb6Dkqw6iBcRy4fod2U\nrqRHTSBifB3q1bPeawvhSCoXrszcAdOJ79qLxmF7OHrUeq+dkpZC2E99OfprWQYE/R9vvWW917Yl\nKQZORinFD5YJtG9SjA1BPenYJYmLF3P+ukcvH6PJtx1IXv4Byz7v7VDd5IWwhY4VOjKp11ck9OpI\nk7B9rFmT89dMTkvGMnUI6zek8/dS0/jsU8cYkfRRZLsYKKX6KKX2K6XSlFJ1H7BcJ6XUIaXUUaXU\na9ltT/zJ08OTn3pN44lG/vzWvD21Gl9h9ersv17M4R3U+vIJfLb+HzsmjaBpU+tlFcKR9a3Wl2+7\nf0b64Hb0fnkd779Pts/H3Uy6RZ1PQlnz3wT+0yyc9971dppCADnbM9gL9AD+e78FlFKewHigE1AV\nGKCUqpKDNt1CTEzMQ5fx9vRmdu+f6d+sER7PNGbwK9sZOBB+++3R20lO1gz76kfaTulIy/jxHPl5\nJBUrZj+3LTzKunAXsi7+ZM11MbDGQGb3m45H/17M/X0CdetpFi/msfoi/Lx6H4FvNeTCkdJs/VcE\nTw91vpEcs10MtNaHtNZHHrJYQ+CY1vqE1joFmA3IFesP8agbuofy4JMOn/BJl/dI7d+Z2Epv07D5\nTXr1gnnz4EYW8+OkpcGePfC3d45T4NnuhMd+zk/t1rL08574+Vn3fViD/AH8k6yLP1l7XXQo34EN\nI9aRu+mPeA3txD/HHKJOHfjii4wvWFkVhvPn4duJCZR56gMGr2xN35KvcX7iD9Sq4WXVbPZi69Ql\ngdN3PY4FGtm4TbczoMYAWpRuwRur3+DwSxVJVCP5YsZgnnyqEsWLKYoVA19fuHglmd+SNuPZYBIp\nZRbzdMdX+LTXXHy8fEy/BSGMCykcwsbhG/li8xd8kt6CoDwdWLp3KJ+0bE1KkhdlykD+/HArXnP8\n6m/cKD0L1eB7atVqwsohv1KxSBnTbyFHHlgMlFIrgWJZ/OhNrXX0I7y+9CKzkyD/IGb0mMG+C/uY\nvGMycz3a4Nc8hQJ5KpGSnodLyVc5k3iY8gEVeLL2QIbX+ZKCuQuaji2EQ/H29ObVZq8yqt4opuya\nwsxcb3I9+ADl81fGk0LEJSdyPuk3PD1gaFULzzdYSs1ABxt+NJty3ANZKfUL8LLWekcWP2sMjNZa\nd8p8/AaQrrUem8WyUjiEECIbrNED2VqHie4XZBtQUSlVBjgL9AMGZLWgNd6MEEKI7MnJpaU9lFKn\ngcbAYqXU0sznSyilFgNorVOBF4HlwAFgjtb6YM5jCyGEsCaHGahOCCGEOcZ7ILtbpzSlVCml1C+Z\nHfb2KaX+nvl8QaXUSqXUEaXUCqVUgbt+543M9XNIKdXBXHrbUEp5KqV2KqWiMx+75bpQShVQSs1T\nSh1USh1QSjVy43Xxz8zPx16l1CyllI+7rAul1I9KqTil1N67nnvs966Uqpe5/o4qpf7z0Ia11sZu\ngCdwDCgDeAO7gComM9nhPRcDamfe9wMOA1WAccCrmc+/Bnyceb9q5nrxzlxPxwAP0+/DyuvkJWAm\nEJX52C3XBTANGJ553wvI747rgoxL0o8DPpmP5wBPucu6AFoAdYC9dz33OO/9jyM+W4GGmfeXAJ0e\n1K7pPQO365SmtT6vtd6Vef8WcJCMjd9Cxh8DMv/tnnk/DPhZa52itT5Bxn92Q7uGtiGlVBDQBZjE\nnxciuN26UErlB1porX+EjPNtWuvruOG6yOQF5FFKeQF5yLgAxS3WhdZ6HXD1nqcf5703UkoVB/Jp\nrbdmLjf9rt/JkulikFWntJKGsthd5lVWdYAtQKDWOi7zR3FAYOb9EmSslz+42jr6AngFSL/rOXdc\nF2WBi0qpKUqpHUqpiUqpvLjhutBanwE+A06RUQSuaa1X4obr4i6P+97vff4MD1knpouB2569Vkr5\nARHAP7TWN+/+mc7Yr3vQunGJ9aaU6gZc0Frv5D6XJ7vLuiDjm3Bd4FutdV0gHnj97gXcZV0opQLI\n+CZchow/an5KqcF3L+Mu6yIrj/Des8V0MTgDlLrrcSn+Ws1cklLKm4xCMENrvTDz6TilVLHMnxcH\nLmQ+f+86Csp8zhU0BSxKqd+Bn4E2SqkZuOe6iAVitda/Zj6eR0ZxOO+G66Id8LvW+rLOuDx9PtAE\n91wXf3icz0Rs5vNB9zz/wHViuhjc6ZSmlMpFRqe0KMOZbEoppYDJwAGt9Zd3/SiKjJNkZP678K7n\n+yulcimlygIVyTgx5PS01m9qrUtprcsC/YE1WushuOe6OA+cVkpVynyqHbAfiMbN1gVwEmislMqd\n+XlpR0Y/JXdcF394rM9E5vZ0I/OKNAUMuet3suYAZ847k3FFzTHgDdN57PB+m5NxfHwXsDPz1gko\nCKwCjgArgAJ3/c6bmevnENDR9Huw0XppyZ9XE7nlugBqAb8Cu8n4NpzfjdfFaDIurthLxglTb3dZ\nF2TsJZ8Fksk4pzosO+8dqJe5/o4BXz2sXel0JoQQwvhhIiGEEA5AioEQQggpBkIIIaQYCCGEQIqB\nEEIIpBgIIYRAioEQQgikGAghhAD+P4av4eGhyXqGAAAAAElFTkSuQmCC\n",
243 | "text/plain": [
244 | ""
245 | ]
246 | },
247 | "metadata": {},
248 | "output_type": "display_data"
249 | }
250 | ],
251 | "source": [
252 | "plot_predicted, = plt.plot(predicted, label='predicted')\n",
253 | "plot_test, = plt.plot(y['test'], label='test')\n",
254 | "plt.legend(handles=[plot_predicted, plot_test])"
255 | ]
256 | }
257 | ],
258 | "metadata": {
259 | "kernelspec": {
260 | "display_name": "Python 3",
261 | "language": "python",
262 | "name": "python3"
263 | },
264 | "language_info": {
265 | "codemirror_mode": {
266 | "name": "ipython",
267 | "version": 3.0
268 | },
269 | "file_extension": ".py",
270 | "mimetype": "text/x-python",
271 | "name": "python",
272 | "nbconvert_exporter": "python",
273 | "pygments_lexer": "ipython3",
274 | "version": "3.4.4"
275 | }
276 | },
277 | "nbformat": 4,
278 | "nbformat_minor": 0
279 | }
--------------------------------------------------------------------------------
/9.RNN/lstm-for-sine-wave.py:
--------------------------------------------------------------------------------
1 |
2 | # coding: utf-8
3 |
4 | # In[13]:
5 |
6 | # get_ipython().magic('matplotlib inline')
7 | import numpy as np
8 | from matplotlib import pyplot as plt
9 |
10 | from tensorflow.contrib import learn
11 | from sklearn.metrics import mean_squared_error, mean_absolute_error
12 | from lstm_predictor import generate_data, lstm_model
13 |
14 |
15 | # ## Libraries
16 | #
17 | # - numpy: package for scientific computing
18 | # - pandas: data structures and data analysis tools
19 | # - tensorflow: open source software library for machine intelligence
20 | # - matplotlib: 2D plotting library
21 | #
22 | #
23 | # - **learn**: Simplified interface for TensorFlow (mimicking Scikit Learn) for Deep Learning
24 | # - mse: "mean squared error" as evaluation metric
25 | # - **lstm_predictor**: our lstm class
26 | #
27 |
28 | # In[14]:
29 |
30 | LOG_DIR = './ops_logs'
31 | TIMESTEPS = 5
32 | RNN_LAYERS = [{'steps': TIMESTEPS}]
33 | DENSE_LAYERS = [10, 10]
34 | TRAINING_STEPS = 100000
35 | BATCH_SIZE = 100
36 | PRINT_STEPS = TRAINING_STEPS / 100
37 |
38 |
39 | # ## Parameter definitions
40 | #
41 | # - LOG_DIR: log file
42 | # - TIMESTEPS: RNN time steps
43 | # - RNN_LAYERS: RNN layer 정보
44 | # - DENSE_LAYERS: DNN 크기 [10, 10]: Two dense layer with 10 hidden units
45 | # - TRAINING_STEPS: 학습 스텝
46 | # - BATCH_SIZE: 배치 학습 크기
47 | # - PRINT_STEPS: 학습 과정 중간 출력 단계 (전체의 1% 해당하는 구간마다 출력)
48 |
49 | # In[15]:
50 |
51 | regressor = learn.TensorFlowEstimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),
52 | n_classes=0,
53 | verbose=1,
54 | steps=TRAINING_STEPS,
55 | optimizer='Adagrad',
56 | learning_rate=0.03,
57 | batch_size=BATCH_SIZE)
58 |
59 |
60 | # ## Create a regressor with TF Learn
61 | #
62 | # : 예측을 위한 모델 생성. TF learn 라이브러리에 제공되는 TensorFlowEstimator를 사용.
63 | #
64 | # **Parameters**:
65 | #
66 | # - model_fn: 학습 및 예측에 사용할 모델
67 | # - n_classes: label에 해당하는 클래스 수 (0: prediction, 1이상: classification) 확인필요
68 | # - verbose: 과정 출력
69 | # - steps: 학습 스텝
70 | # - optimizer: 최적화 기법 ("SGD", "Adam", "Adagrad")
71 | # - learning_rate: learning rate
72 | # - batch_size: batch size
73 | #
74 | #
75 |
76 | # In[ ]:
77 |
78 | X, y = generate_data(np.sin, np.linspace(0, 100, 10000), TIMESTEPS, seperate=False)
79 | # create a lstm instance and validation monitor
80 |
81 | validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],
82 | every_n_steps=PRINT_STEPS,
83 | early_stopping_rounds=1000)
84 |
85 |
86 | # ## Generate a dataset
87 | #
88 | # 1. generate_data: 학습에 사용될 데이터를 특정 함수를 이용하여 만듭니다.
89 | # - fct: 데이터를 생성할 함수
90 | # - x: 함수값을 관측할 위치
91 | # - time_steps: 관측(observation)
92 | # - seperate: check multimodal
93 | # 1. ValidationMonitor: training 이후, validation 과정을 모니터링
94 | # - x
95 | # - y
96 | # - every_n_steps: 중간 출력
97 | # - early_stopping_rounds
98 |
99 | # In[16]:
100 |
101 | regressor.fit(X['train'], y['train'], monitors=[validation_monitor], logdir=LOG_DIR)
102 |
103 |
104 | # ## Train and validation
105 | #
106 | # - fit: training data를 이용해 학습, 모니터링과 로그를 통해 기록
107 | #
108 | #
109 |
110 | # In[17]:
111 |
112 | predicted = regressor.predict(X['test'])
113 | mse = mean_squared_error(y['test'], predicted)
114 | print ("Error: %f" % mse)
115 |
116 |
117 | # ## Evaluate using test set
118 | #
119 | # Evaluate our hypothesis using test set. The mean squared error (MSE) is used for the evaluation metric.
120 | #
121 | #
122 |
123 | # In[18]:
124 |
125 | plot_predicted, = plt.plot(predicted, label='predicted')
126 | plot_test, = plt.plot(y['test'], label='test')
127 | plt.legend(handles=[plot_predicted, plot_test])
128 |
129 |
130 |
131 | # ## Plotting
132 | #
133 | # Then, plot both predicted values and original values from test set.
134 |
--------------------------------------------------------------------------------
/9.RNN/lstm_predictor.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import tensorflow as tf
4 | from tensorflow.python.framework import dtypes
5 | from tensorflow.contrib import learn
6 |
7 | import logging
8 | logging.basicConfig(level=logging.INFO)
9 |
10 | def x_sin(x):
11 | return x * np.sin(x)
12 |
13 |
14 | def sin_cos(x):
15 | return pd.DataFrame(dict(a=np.sin(x), b=np.cos(x)), index=x)
16 |
17 |
18 | def rnn_data(data, time_steps, labels=False):
19 | """
20 | creates new data frame based on previous observation
21 | * example:
22 | l = [1, 2, 3, 4, 5]
23 | time_steps = 2
24 | -> labels == False [[1, 2], [2, 3], [3, 4]]
25 | -> labels == True [2, 3, 4, 5]
26 | """
27 | rnn_df = []
28 | for i in range(len(data) - time_steps):
29 | if labels:
30 | try:
31 | rnn_df.append(data.iloc[i + time_steps].as_matrix())
32 | except AttributeError:
33 | rnn_df.append(data.iloc[i + time_steps])
34 | else:
35 | data_ = data.iloc[i: i + time_steps].as_matrix()
36 | rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
37 | return np.array(rnn_df)
38 |
39 |
40 | def split_data(data, val_size=0.1, test_size=0.1):
41 | """
42 | splits data to training, validation and testing parts
43 | """
44 | ntest = int(round(len(data) * (1 - test_size)))
45 | nval = int(round(len(data.iloc[:ntest]) * (1 - val_size)))
46 |
47 | df_train, df_val, df_test = data.iloc[:nval], data.iloc[nval:ntest], data.iloc[ntest:]
48 |
49 | return df_train, df_val, df_test
50 |
51 |
52 | def prepare_data(data, time_steps, labels=False, val_size=0.05, test_size=0.05):
53 | """
54 | Given the number of `time_steps` and some data,
55 | prepares training, validation and test data for an lstm cell.
56 | """
57 | df_train, df_val, df_test = split_data(data, val_size, test_size)
58 | return (rnn_data(df_train, time_steps, labels=labels),
59 | rnn_data(df_val, time_steps, labels=labels),
60 | rnn_data(df_test, time_steps, labels=labels))
61 |
62 |
63 | def generate_data(fct, x, time_steps, seperate=False):
64 | """generates data with based on a function fct"""
65 | data = fct(x)
66 | if not isinstance(data, pd.DataFrame):
67 | data = pd.DataFrame(data)
68 | train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)
69 | train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)
70 | return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)
71 |
72 | def load_csvdata(rawdata, time_steps, seperate=False):
73 | data = rawdata
74 | if not isinstance(data, pd.DataFrame):
75 | data = pd.DataFrame(data)
76 | train_x, val_x, test_x = prepare_data(data['a'] if seperate else data, time_steps)
77 | train_y, val_y, test_y = prepare_data(data['b'] if seperate else data, time_steps, labels=True)
78 | return dict(train=train_x, val=val_x, test=test_x), dict(train=train_y, val=val_y, test=test_y)
79 |
80 | def lstm_model(time_steps, rnn_layers, dense_layers=None):
81 | """
82 | Creates a deep model based on:
83 | * stacked lstm cells
84 | * an optional dense layers
85 | :param time_steps: the number of time steps the model will be looking at.
86 | :param rnn_layers: list of int or dict
87 | * list of int: the steps used to instantiate the `BasicLSTMCell` cell
88 | * list of dict: [{steps: int, keep_prob: int}, ...]
89 | :param dense_layers: list of nodes for each layer
90 | :return: the model definition
91 | """
92 |
93 | def lstm_cells(layers):
94 | if isinstance(layers[0], dict):
95 | return [tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.BasicLSTMCell(layer['steps'],
96 | state_is_tuple=True),
97 | layer['keep_prob'])
98 | if layer.get('keep_prob') else tf.nn.rnn_cell.BasicLSTMCell(layer['steps'],
99 | state_is_tuple=True)
100 | for layer in layers]
101 | return [tf.nn.rnn_cell.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]
102 |
103 | def dnn_layers(input_layers, layers):
104 | if layers and isinstance(layers, dict):
105 | return learn.ops.dnn(input_layers,
106 | layers['layers'],
107 | activation=layers.get('activation'),
108 | dropout=layers.get('dropout'))
109 | elif layers:
110 | return learn.ops.dnn(input_layers, layers)
111 | else:
112 | return input_layers
113 |
114 | def _lstm_model(X, y):
115 | stacked_lstm = tf.nn.rnn_cell.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
116 | x_ = learn.ops.split_squeeze(1, time_steps, X)
117 | output, layers = tf.nn.rnn(stacked_lstm, x_, dtype=dtypes.float32)
118 | output = dnn_layers(output[-1], dense_layers)
119 | return learn.models.linear_regression(output, y)
120 |
121 | return _lstm_model
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlowLecture
2 |
3 | 여러 강의와 교재를 보고 작성한 예제 코드들 입니다.
4 | 이론적인 정리 내용은 저의 개인 [블로그](http://goodtogreate.tistory.com/category/Data%20Science/TensorFlow%20%28python%29)에 있습니다.
5 |
6 | ## 개발환경 ##
7 |
8 | - Ubuntu 16.04
9 | - Pycharm Pro and Jupyter Notebook
10 | - Python 3.5
11 | - Tensor Flow 1.4
12 |
13 | ## 목차 ##
14 |
15 | ### Numpy, Scipy 코드 ###
16 |
17 | 0.1. [Fundamental_Neural_Network](https://github.com/leejaymin/TensorFlowLecture/tree/master/0.1.Fundamental_Neural_Network)
18 |
19 | ### TensorFlow 코드 ###
20 | 0. [Basic Example](https://github.com/leejaymin/TensorFlowLecture/tree/master/0.2.Basic)
21 | 1. [Linear Regression](https://github.com/leejaymin/TensorFlowLecture/tree/master/1.Linear%20Regression)
22 | 1. [Logistic Classification](https://github.com/leejaymin/TensorFlowLecture/tree/master/2.Logistic%20Classification)
23 | 1. [Multiple Perceptron for XOR Problem](https://github.com/leejaymin/TensorFlowLecture/tree/master/3.XOR)
24 | 1. [MNIST set](https://github.com/leejaymin/TensorFlowLecture/tree/master/4.MNIST)
25 | - [Softmax classification](https://github.com/leejaymin/TensorFlowLecture/blob/master/4.MNIST/MNIST_Tutorial_DNN.ipynb)
26 | - [DNN with ReLU](https://github.com/leejaymin/TensorFlowLecture/blob/master/4.MNIST/MNIST_Tutorial_DNN.ipynb)
27 | - [DNN with Dropout and xavier_init](https://github.com/leejaymin/TensorFlowLecture/blob/master/4.MNIST/MNIST_Tutorial_DNN.ipynb)
28 | 1. [CNN](https://github.com/leejaymin/TensorFlowLecture/tree/master/5.CNN)
29 | 1. [Early Stop and Index Shuffling](https://github.com/leejaymin/TensorFlowLecture/tree/master/6.Early%20Stop%20and%20Index%20Shuffling)
30 | 1. [TensorBoard](https://github.com/leejaymin/TensorFlowLecture/tree/master/7.TensorBoard)
31 | 1. [Save and Restore](https://github.com/leejaymin/TensorFlowLecture/tree/master/8.Save%20and%20Restore)
32 | 1. [RNN](https://github.com/leejaymin/TensorFlowLecture/tree/master/9.RNN)
33 | 1. [Transfer Learning](https://github.com/leejaymin/TensorFlowLecture/tree/master/10.Transfer%20Learning)
34 |
35 | ## 참고자료 ##
36 | - Coursera, Machine Learning (Andrew ng): [URL](https://www.coursera.org/learn/machine-learning/home/welcome)
37 | - Python으로 구현된 코세라 숙제 코드: [URL](https://github.com/kaleko/CourseraML)
38 | - 김성훈 교수님 강의 페이지: [URL](http://hunkim.github.io/ml/)
39 | - 구글 공식 TensorFlow 튜토리얼: [URL](https://www.tensorflow.org/get_started/)
40 | - Bay Area DL School Live Stream, Sherry Moore, tf-tutorial: [URL](https://github.com/sherrym/tf-tutorial)
41 | - 텐서플로 코리아: [URL](https://tensorflowkorea.wordpress.com/)
42 | - 한국인지과학협회 딥러닝 튜토리얼: [URL](https://github.com/leejaymin/deeplearning_tutorial)
43 | - tgjeon, TensorFlow-Tutorials-for-Time-Series: [URL](https://github.com/tgjeon/TensorFlow-Tutorials-for-Time-Series)
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------