├── .gitignore ├── README.md ├── lab1 ├── 01_hello_world.py ├── 02_operations.py └── 03_placeholder.py ├── lab2 ├── 01_linear_regression.py └── 02_linear_regression_placeholder.py ├── lab3 ├── 01_show_cost.py └── 02_hand_made_descent.py ├── lab4 ├── 01_multi-variable_linear_regression.py ├── 02_with_matrix.py ├── 03_edit_bias.py ├── 04_loading_data_from_file.py └── train.txt ├── lab5 ├── 01_logistic_regression.py ├── 02_ask_to_ml.py └── train.txt └── lab6 ├── 01_softmax_classification.py ├── 02_test_ont-hot_encoding.py └── train.txt /.gitignore: -------------------------------------------------------------------------------- 1 | *.png 2 | *.gz 3 | !test_2.png 4 | events.* 5 | .idea/ 6 | /07 - NN/logs 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Study_TensorFlow 2 | 3 | ## TensorFlow 실습 코드 4 | 5 | 김성훈 교수님의 [모두를 위한 머신러닝/딥러닝 강의](http://hunkim.github.io/ml) 의 실습 코드 입니다.
6 | [공식 실습코드](https://github.com/hunkim/DeepLearningZeroToAll) 7 |
8 | 9 | 10 | 11 | ### 실습목록 12 | 13 | [Lab 1.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab1)
14 | [Lab 2.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab2)
15 | [Lab 3.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab3)
16 | [Lab 4.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab4)
17 | [Lab 5.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab5)
18 | [Lab 6.](https://github.com/FuZer/Study_TensorFlow/tree/master/lab6)
19 | 20 | -------------------------------------------------------------------------------- /lab1/01_hello_world.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | 4 | import tensorflow as tf 5 | 6 | hello = tf.constant('Hello, TensorFlow!') 7 | 8 | # Start tf session 9 | sess = tf.Session() 10 | 11 | print(sess.run(hello)) 12 | -------------------------------------------------------------------------------- /lab1/02_operations.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | 5 | # Start tf session 6 | sess = tf.Session() 7 | 8 | a = tf.constant(2) 9 | b = tf.constant(3) 10 | 11 | c = a+b 12 | 13 | # Print out operation everything is operation 14 | print(a) 15 | print(b) 16 | print(c) 17 | 18 | print(a+b) 19 | 20 | 21 | # Print out the result of operation 22 | print(sess.run(a)) 23 | print(sess.run(b)) 24 | print(sess.run(c)) 25 | print(sess.run(a+b)) 26 | -------------------------------------------------------------------------------- /lab1/03_placeholder.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | 4 | import tensorflow as tf 5 | 6 | a = tf.placeholder(tf.int16) 7 | b = tf.placeholder(tf.int16) 8 | 9 | add = tf.add(a, b) 10 | mul = tf.multiply(a, b) 11 | 12 | # Same op? 13 | print(add) 14 | print(a + b) 15 | print(mul) 16 | print(a * b) 17 | 18 | # Launch the default graph 19 | with tf.Session() as sess: 20 | print(sess.run(add, feed_dict={a: 2, b: 3})) 21 | 22 | # it's work! 23 | feed = {a: 3, b: 5} 24 | print(sess.run(mul, feed_dict=feed)) 25 | -------------------------------------------------------------------------------- /lab2/01_linear_regression.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | 4 | import tensorflow as tf 5 | 6 | x_data = [1, 2, 3] 7 | y_data = [1, 2, 3] 8 | 9 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 10 | b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 11 | 12 | hypothesis = W * x_data + b 13 | 14 | cost = tf.reduce_mean(tf.square(hypothesis - y_data)) 15 | 16 | a = tf.Variable(0.1) 17 | optimizer = tf.train.GradientDescentOptimizer(a) 18 | train = optimizer.minimize(cost) 19 | 20 | init = tf.global_variables_initializer() 21 | 22 | sess = tf.Session() 23 | sess.run(init) 24 | 25 | for step in range(2001): 26 | sess.run(train) 27 | if step % 20 == 0: 28 | print(step, sess.run(cost), sess.run(W), sess.run(b)) 29 | -------------------------------------------------------------------------------- /lab2/02_linear_regression_placeholder.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | 5 | x_data = [1, 2, 3] 6 | y_data = [1, 2, 3] 7 | 8 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 9 | b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 10 | 11 | X = tf.placeholder(tf.float32) 12 | Y = tf.placeholder(tf.float32) 13 | 14 | hypothesis = W * X + b 15 | 16 | cost = tf.reduce_mean(tf.square(hypothesis - Y)) 17 | 18 | a = tf.Variable(0.1) 19 | optimizer = tf.train.GradientDescentOptimizer(a) 20 | train = optimizer.minimize(cost) 21 | 22 | init = tf.global_variables_initializer() 23 | 24 | sess = tf.Session() 25 | sess.run(init) 26 | 27 | for step in range(2001): 28 | sess.run(train, feed_dict={X: x_data, Y: y_data}) 29 | if step % 20 == 0: 30 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W), sess.run(b)) 31 | 32 | print(sess.run(hypothesis, feed_dict={X: 5})) 33 | print(sess.run(hypothesis, feed_dict={X: 2.5})) 34 | -------------------------------------------------------------------------------- /lab3/01_show_cost.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | 4 | import tensorflow as tf 5 | from matplotlib import pyplot as plt 6 | 7 | # Graph Input 8 | X = [1., 2., 3.] 9 | Y = [1., 2., 3.] 10 | m = n_smaples = len(X) 11 | 12 | # model weight 13 | W = tf.placeholder(tf.float32) 14 | 15 | # Construct a linear model 16 | hypothesis = tf.multiply(X, W) 17 | 18 | # Cost function 19 | cost = tf.reduce_sum(tf.pow(hypothesis - Y, 2)) / m 20 | 21 | init = tf.global_variables_initializer() 22 | 23 | # for graphs 24 | W_val = [] 25 | cost_val = [] 26 | 27 | # Launch the graphs 28 | sess = tf.Session() 29 | sess.run(init) 30 | 31 | for i in range(-30, 50): 32 | print(i * -0.1, sess.run(cost, feed_dict={W: i * 0.1})) 33 | W_val.append(i * 0.1) 34 | cost_val.append(sess.run(cost, feed_dict={W: i * 0.1})) 35 | 36 | plt.plot(W_val, cost_val, 'ro') 37 | plt.ylabel('cost') 38 | plt.xlabel('W') 39 | plt.show() 40 | -------------------------------------------------------------------------------- /lab3/02_hand_made_descent.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | 4 | import tensorflow as tf 5 | 6 | x_data = [1, 2, 3] 7 | y_data = [1, 2, 3] 8 | 9 | W = tf.Variable(tf.random_uniform([1], -10.0, 10.0)) 10 | 11 | X = tf.placeholder(tf.float32) 12 | Y = tf.placeholder(tf.float32) 13 | 14 | hypothesis = W * X 15 | 16 | cost = tf.reduce_mean(tf.square(hypothesis - Y)) 17 | 18 | lr = 0.1 19 | descent = W - tf.multiply(lr, tf.reduce_mean(tf.multiply((tf.multiply(W, X) - Y), X))) 20 | train = W.assign(descent) 21 | 22 | init = tf.global_variables_initializer() 23 | 24 | sess = tf.Session() 25 | sess.run(init) 26 | 27 | for step in range(2001): 28 | sess.run(train, feed_dict={X: x_data, Y: y_data}) 29 | if step % 20 == 0: 30 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) 31 | 32 | print(sess.run(hypothesis, feed_dict={X: 5})) 33 | print(sess.run(hypothesis, feed_dict={X: 2.5})) 34 | -------------------------------------------------------------------------------- /lab4/01_multi-variable_linear_regression.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | 5 | x1_data = [1, 0, 3, 0, 5] 6 | x2_data = [0, 2, 0, 4, 0] 7 | y_data = [1, 2, 3, 4, 5] 8 | 9 | W1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 10 | W2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 11 | 12 | b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 13 | 14 | hypothesis = W1 * x1_data + W2 * x2_data + b 15 | 16 | cost = tf.reduce_mean(tf.square(hypothesis - y_data)) 17 | 18 | learning_rate = 0.1 19 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 20 | train = optimizer.minimize(cost) 21 | 22 | init = tf.global_variables_initializer() 23 | 24 | sess = tf.Session() 25 | sess.run(init) 26 | 27 | for step in range(2001): 28 | sess.run(train) 29 | if step % 20 == 0: 30 | print(step, sess.run(cost), sess.run(W1), sess.run(W2), sess.run(b)) 31 | -------------------------------------------------------------------------------- /lab4/02_with_matrix.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | 5 | x_data = [[1., 0., 3., 0., 5.], 6 | [0., 2., 0., 4., 0.]] 7 | 8 | y_data = [1, 2, 3, 4, 5] 9 | 10 | W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0)) 11 | b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 12 | 13 | hypothesis = tf.matmul(W, x_data) + b 14 | 15 | cost = tf.reduce_mean(tf.square(hypothesis - y_data)) 16 | 17 | learning_rate = 0.1 18 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 19 | train = optimizer.minimize(cost) 20 | 21 | init = tf.global_variables_initializer() 22 | 23 | sess = tf.Session() 24 | sess.run(init) 25 | 26 | for step in range(2001): 27 | sess.run(train) 28 | if step % 20 == 0: 29 | print(step, sess.run(cost), sess.run(W), sess.run(b)) 30 | -------------------------------------------------------------------------------- /lab4/03_edit_bias.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import tensorflow as tf 4 | 5 | x_data = [[1., 1., 1., 1., 1.], 6 | [1., 0., 3., 0., 5.], 7 | [0., 2., 0., 4., 0.]] 8 | 9 | y_data = [1, 2, 3, 4, 5] 10 | 11 | W = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0)) 12 | 13 | hypothesis = tf.matmul(W, x_data) 14 | 15 | cost = tf.reduce_mean(tf.square(hypothesis - y_data)) 16 | 17 | learning_rate = 0.1 18 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 19 | train = optimizer.minimize(cost) 20 | 21 | init = tf.global_variables_initializer() 22 | 23 | sess = tf.Session() 24 | sess.run(init) 25 | 26 | for step in range(2001): 27 | sess.run(train) 28 | if step % 20 == 0: 29 | print(step, sess.run(cost), sess.run(W)) 30 | -------------------------------------------------------------------------------- /lab4/04_loading_data_from_file.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | xy = np.loadtxt('train.txt', unpack=True, dtype='float32') 7 | x_data = xy[0:-1] 8 | y_data = xy[-1] 9 | 10 | W = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0)) 11 | 12 | hypothesis = tf.matmul(W, x_data) 13 | 14 | cost = tf.reduce_mean(tf.square(hypothesis - y_data)) 15 | 16 | learning_rate = 0.1 17 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 18 | train = optimizer.minimize(cost) 19 | 20 | init = tf.global_variables_initializer() 21 | 22 | sess = tf.Session() 23 | sess.run(init) 24 | 25 | for step in range(2001): 26 | sess.run(train) 27 | if step % 20 == 0: 28 | print(step, sess.run(cost), sess.run(W)) 29 | -------------------------------------------------------------------------------- /lab4/train.txt: -------------------------------------------------------------------------------- 1 | #x0 x1 x2 y 2 | 1 1 0 1 3 | 1 0 2 2 4 | 1 3 0 3 5 | 1 0 4 4 6 | 1 5 0 5 -------------------------------------------------------------------------------- /lab5/01_logistic_regression.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | xy = np.loadtxt('train.txt', unpack=True, dtype='float32') 7 | x_data = xy[0:-1] 8 | y_data = xy[-1] 9 | 10 | X = tf.placeholder(tf.float32) 11 | Y = tf.placeholder(tf.float32) 12 | 13 | W = tf.Variable(tf.random_uniform([1, len(x_data)], -1.0, 1.0)) 14 | 15 | h = tf.matmul(W, X) 16 | hypothesis = tf.div(1., 1. + tf.exp(-h)) 17 | 18 | cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) 19 | 20 | learning_rate = 0.1 21 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 22 | train = optimizer.minimize(cost) 23 | 24 | init = tf.global_variables_initializer() 25 | 26 | sess = tf.Session() 27 | sess.run(init) 28 | 29 | for step in range(2001): 30 | sess.run(train, feed_dict={X: x_data, Y: y_data}) 31 | 32 | if step % 20 == 0: 33 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) 34 | -------------------------------------------------------------------------------- /lab5/02_ask_to_ml.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | xy = np.loadtxt('train.txt', unpack=True, dtype='float32') 7 | x_data = xy[0:-1] 8 | y_data = xy[-1] 9 | 10 | X = tf.placeholder(tf.float32) 11 | Y = tf.placeholder(tf.float32) 12 | 13 | W = tf.Variable(tf.random_uniform([1, len(x_data)], -1.0, 1.0)) 14 | 15 | h = tf.matmul(W, X) 16 | hypothesis = tf.div(1., 1. + tf.exp(-h)) 17 | 18 | cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) 19 | 20 | learning_rate = 0.1 21 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 22 | train = optimizer.minimize(cost) 23 | 24 | init = tf.global_variables_initializer() 25 | 26 | sess = tf.Session() 27 | sess.run(init) 28 | 29 | for step in range(2001): 30 | sess.run(train, feed_dict={X: x_data, Y: y_data}) 31 | 32 | if step % 20 == 0: 33 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) 34 | 35 | print('---------------------------') 36 | 37 | print(sess.run(hypothesis, feed_dict={X: [[1], [2], [2]]}) > 0.5) 38 | print(sess.run(hypothesis, feed_dict={X: [[1], [5], [5]]}) > 0.5) 39 | print(sess.run(hypothesis, feed_dict={X: [[1, 1], [4, 3], [3, 5]]}) > 0.5) 40 | -------------------------------------------------------------------------------- /lab5/train.txt: -------------------------------------------------------------------------------- 1 | #x0 x1 x2 y 2 | 1 2 1 0 3 | 1 3 2 0 4 | 1 3 4 0 5 | 1 5 5 1 6 | 1 7 5 1 7 | 1 2 5 1 -------------------------------------------------------------------------------- /lab6/01_softmax_classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | xy = np.loadtxt('train.txt', unpack=True, dtype='float32') 7 | x_data = np.transpose(xy[0:3]) 8 | y_data = np.transpose(xy[3:]) 9 | 10 | 11 | X = tf.placeholder("float", [None, 3]) 12 | Y = tf.placeholder("float", [None, 3]) 13 | 14 | W = tf.Variable(tf.zeros([3, 3])) 15 | 16 | # matrix shape X=[8, 3], W=[3, 3] 17 | hypothesis = tf.nn.softmax(tf.matmul(X, W)) 18 | 19 | learning_rate = 0.001 20 | 21 | cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), reduction_indices=1)) 22 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 23 | 24 | init = tf.global_variables_initializer() 25 | 26 | with tf.Session() as sess: 27 | sess.run(init) 28 | 29 | for step in range(2001): 30 | sess.run(optimizer, feed_dict={X: x_data, Y: y_data}) 31 | if step % 200 == 0: 32 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) 33 | -------------------------------------------------------------------------------- /lab6/02_test_ont-hot_encoding.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | xy = np.loadtxt('train.txt', unpack=True, dtype='float32') 7 | x_data = np.transpose(xy[0:3]) 8 | y_data = np.transpose(xy[3:]) 9 | 10 | X = tf.placeholder("float", [None, 3]) 11 | Y = tf.placeholder("float", [None, 3]) 12 | 13 | W = tf.Variable(tf.zeros([3, 3])) 14 | 15 | # matrix shape X=[8, 3], W=[3, 3] 16 | hypothesis = tf.nn.softmax(tf.matmul(X, W)) 17 | 18 | learning_rate = 0.001 19 | 20 | cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), reduction_indices=1)) 21 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 22 | 23 | init = tf.global_variables_initializer() 24 | 25 | with tf.Session() as sess: 26 | sess.run(init) 27 | 28 | for step in range(2001): 29 | sess.run(optimizer, feed_dict={X: x_data, Y: y_data}) 30 | if step % 200 == 0: 31 | print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)) 32 | 33 | print('--------------------') 34 | 35 | a = sess.run(hypothesis, feed_dict={X: [[1, 11, 7]]}) 36 | print(a, sess.run(tf.argmax(a, 1))) 37 | 38 | b = sess.run(hypothesis, feed_dict={X: [[1, 3, 4]]}) 39 | print(b, sess.run(tf.argmax(b, 1))) 40 | 41 | c = sess.run(hypothesis, feed_dict={X: [[1, 1, 0]]}) 42 | print(c, sess.run(tf.argmax(c, 1))) 43 | 44 | all = sess.run(hypothesis, feed_dict={X: [[1, 11, 7], [1, 3, 4], [1, 1, 0]]}) 45 | print(all, sess.run(tf.argmax(all, 1))) 46 | -------------------------------------------------------------------------------- /lab6/train.txt: -------------------------------------------------------------------------------- 1 | # x0 x1 x2 y[A B C] 2 | 1 2 1 0 0 1 3 | 1 3 2 0 0 1 4 | 1 3 4 0 0 1 5 | 1 5 5 0 1 0 6 | 1 7 5 0 1 0 7 | 1 2 5 0 1 0 8 | 1 6 6 1 0 0 9 | 1 7 7 1 0 0 --------------------------------------------------------------------------------