├── fc1.zip ├── fc1 ├── mnist_backward.py ├── mnist_forward.py └── mnist_test.py ├── fc2.zip ├── fc2 ├── mnist_backward.py ├── mnist_forward.py └── mnist_test.py ├── fc3.zip ├── fc3 ├── mnist_app.py ├── mnist_backward.py ├── mnist_forward.py ├── mnist_test.py └── pic │ ├── 0.png │ ├── 1.png │ ├── 2.png │ ├── 3.png │ ├── 4.png │ ├── 5.png │ ├── 6.png │ ├── 7.png │ ├── 8.png │ └── 9.png ├── fc4.z01 ├── fc4.z02 ├── fc4.z03 ├── fc4.zip ├── fc4 └── mnist_data_jpg │ └── mnist_test_jpg_10000 │ ├── 10_0.jpg │ ├── 11_6.jpg │ ├── 12_9.jpg │ ├── 13_0.jpg │ ├── 14_1.jpg │ ├── 15_5.jpg │ ├── 16_9.jpg │ ├── 17_7.jpg │ ├── 18_3.jpg │ ├── 19_4.jpg │ ├── 1_2.jpg │ ├── 20_9.jpg │ ├── 21_6.jpg │ ├── 22_6.jpg │ ├── 23_5.jpg │ ├── 24_4.jpg │ ├── 25_0.jpg │ ├── 26_7.jpg │ ├── 27_4.jpg │ ├── 28_0.jpg │ ├── 29_1.jpg │ ├── 2_1.jpg │ ├── 30_3.jpg │ ├── 31_1.jpg │ ├── 32_3.jpg │ ├── 33_4.jpg │ ├── 34_7.jpg │ ├── 35_2.jpg │ ├── 36_7.jpg │ ├── 37_1.jpg │ ├── 38_2.jpg │ ├── 39_1.jpg │ ├── 3_0.jpg │ ├── 40_1.jpg │ ├── 41_7.jpg │ ├── 42_4.jpg │ ├── 43_2.jpg │ ├── 44_3.jpg │ ├── 45_5.jpg │ ├── 46_1.jpg │ ├── 47_2.jpg │ ├── 48_4.jpg │ ├── 49_4.jpg │ ├── 4_4.jpg │ ├── 50_6.jpg │ ├── 51_3.jpg │ ├── 52_5.jpg │ ├── 53_5.jpg │ ├── 54_6.jpg │ ├── 55_0.jpg │ ├── 56_4.jpg │ ├── 57_1.jpg │ ├── 58_9.jpg │ ├── 59_5.jpg │ ├── 5_1.jpg │ ├── 60_7.jpg │ ├── 61_8.jpg │ ├── 62_9.jpg │ ├── 63_3.jpg │ ├── 64_7.jpg │ ├── 65_4.jpg │ ├── 66_6.jpg │ ├── 67_4.jpg │ ├── 68_3.jpg │ ├── 69_0.jpg │ ├── 6_4.jpg │ ├── 70_7.jpg │ ├── 71_0.jpg │ ├── 72_2.jpg │ ├── 73_9.jpg │ ├── 74_1.jpg │ ├── 75_7.jpg │ ├── 76_3.jpg │ ├── 77_2.jpg │ ├── 78_9.jpg │ ├── 79_7.jpg │ ├── 7_9.jpg │ ├── 80_7.jpg │ ├── 81_6.jpg │ ├── 82_2.jpg │ ├── 83_7.jpg │ ├── 84_8.jpg │ ├── 85_4.jpg │ ├── 86_7.jpg │ ├── 87_3.jpg │ ├── 88_6.jpg │ ├── 89_1.jpg │ ├── 8_5.jpg │ ├── 90_3.jpg │ ├── 91_6.jpg │ ├── 92_9.jpg │ ├── 93_3.jpg │ ├── 94_1.jpg │ ├── 95_4.jpg │ ├── 96_1.jpg │ ├── 97_7.jpg │ ├── 98_6.jpg │ └── 9_9.jpg ├── lenet5.zip ├── lenet5 ├── mnist_lenet5_backward.py ├── mnist_lenet5_forward.py └── mnist_lenet5_test.py ├── num.zip ├── opt.zip ├── opt ├── opt4_1.py ├── opt4_2.py ├── opt4_3.py ├── opt4_4.py ├── opt4_5.py ├── opt4_6.py ├── opt4_7.py ├── opt4_8_backward.py ├── opt4_8_forward.py └── opt4_8_generateds.py ├── pic.zip ├── pic ├── 0.png ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png └── 9.png ├── python.zip ├── python ├── a.py ├── animal.py ├── b.py ├── c.py ├── save.dat └── tf3_1.py ├── tf.zip ├── tf ├── tf3_1.py ├── tf3_2.py ├── tf3_3.py ├── tf3_4.py ├── tf3_5.py └── tf3_6.py ├── vgg.zip └── vgg ├── Nclasses.py ├── app.py ├── pic ├── a.jpg ├── b.jpg ├── c.jpg └── d.jpg ├── utils.py └── vgg16.py /fc1.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc1.zip -------------------------------------------------------------------------------- /fc1/mnist_backward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.examples.tutorials.mnist import input_data 3 | import mnist_forward 4 | import os 5 | 6 | BATCH_SIZE = 200 7 | LEARNING_RATE_BASE = 0.1 8 | LEARNING_RATE_DECAY = 0.99 9 | REGULARIZER = 0.0001 10 | STEPS = 50000 11 | MOVING_AVERAGE_DECAY = 0.99 12 | MODEL_SAVE_PATH="./model/" 13 | MODEL_NAME="mnist_model" 14 | 15 | 16 | def backward(mnist): 17 | 18 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 19 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 20 | y = mnist_forward.forward(x, REGULARIZER) 21 | global_step = tf.Variable(0, trainable=False) 22 | 23 | ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) 24 | cem = tf.reduce_mean(ce) 25 | loss = cem + tf.add_n(tf.get_collection('losses')) 26 | 27 | learning_rate = tf.train.exponential_decay( 28 | LEARNING_RATE_BASE, 29 | global_step, 30 | mnist.train.num_examples / BATCH_SIZE, 31 | LEARNING_RATE_DECAY, 32 | staircase=True) 33 | 34 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) 35 | 36 | ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 37 | ema_op = ema.apply(tf.trainable_variables()) 38 | with tf.control_dependencies([train_step, ema_op]): 39 | train_op = tf.no_op(name='train') 40 | 41 | saver = tf.train.Saver() 42 | 43 | with tf.Session() as sess: 44 | init_op = tf.global_variables_initializer() 45 | sess.run(init_op) 46 | 47 | for i in range(STEPS): 48 | xs, ys = mnist.train.next_batch(BATCH_SIZE) 49 | _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) 50 | if i % 1000 == 0: 51 | print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) 52 | saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 53 | 54 | 55 | def main(): 56 | mnist = input_data.read_data_sets("./data/", one_hot=True) 57 | backward(mnist) 58 | 59 | if __name__ == '__main__': 60 | main() 61 | 62 | 63 | -------------------------------------------------------------------------------- /fc1/mnist_forward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | INPUT_NODE = 784 4 | OUTPUT_NODE = 10 5 | LAYER1_NODE = 500 6 | 7 | def get_weight(shape, regularizer): 8 | w = tf.Variable(tf.truncated_normal(shape,stddev=0.1)) 9 | if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 10 | return w 11 | 12 | 13 | def get_bias(shape): 14 | b = tf.Variable(tf.zeros(shape)) 15 | return b 16 | 17 | def forward(x, regularizer): 18 | w1 = get_weight([INPUT_NODE, LAYER1_NODE], regularizer) 19 | b1 = get_bias([LAYER1_NODE]) 20 | y1 = tf.nn.relu(tf.matmul(x, w1) + b1) 21 | 22 | w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer) 23 | b2 = get_bias([OUTPUT_NODE]) 24 | y = tf.matmul(y1, w2) + b2 25 | return y 26 | -------------------------------------------------------------------------------- /fc1/mnist_test.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import time 3 | import tensorflow as tf 4 | from tensorflow.examples.tutorials.mnist import input_data 5 | import mnist_forward 6 | import mnist_backward 7 | TEST_INTERVAL_SECS = 5 8 | 9 | def test(mnist): 10 | with tf.Graph().as_default() as g: 11 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 12 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 13 | y = mnist_forward.forward(x, None) 14 | 15 | ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) 16 | ema_restore = ema.variables_to_restore() 17 | saver = tf.train.Saver(ema_restore) 18 | 19 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 20 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 21 | 22 | while True: 23 | with tf.Session() as sess: 24 | ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) 25 | if ckpt and ckpt.model_checkpoint_path: 26 | saver.restore(sess, ckpt.model_checkpoint_path) 27 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 28 | accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) 29 | print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score)) 30 | else: 31 | print('No checkpoint file found') 32 | return 33 | time.sleep(TEST_INTERVAL_SECS) 34 | 35 | def main(): 36 | mnist = input_data.read_data_sets("./data/", one_hot=True) 37 | test(mnist) 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /fc2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc2.zip -------------------------------------------------------------------------------- /fc2/mnist_backward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.examples.tutorials.mnist import input_data 3 | import mnist_forward 4 | import os 5 | 6 | BATCH_SIZE = 200 7 | LEARNING_RATE_BASE = 0.1 8 | LEARNING_RATE_DECAY = 0.99 9 | REGULARIZER = 0.0001 10 | STEPS = 50000 11 | MOVING_AVERAGE_DECAY = 0.99 12 | MODEL_SAVE_PATH="./model/" 13 | MODEL_NAME="mnist_model" 14 | 15 | 16 | def backward(mnist): 17 | 18 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 19 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 20 | y = mnist_forward.forward(x, REGULARIZER) 21 | global_step = tf.Variable(0, trainable=False) 22 | 23 | ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) 24 | cem = tf.reduce_mean(ce) 25 | loss = cem + tf.add_n(tf.get_collection('losses')) 26 | 27 | learning_rate = tf.train.exponential_decay( 28 | LEARNING_RATE_BASE, 29 | global_step, 30 | mnist.train.num_examples / BATCH_SIZE, 31 | LEARNING_RATE_DECAY, 32 | staircase=True) 33 | 34 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) 35 | 36 | ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 37 | ema_op = ema.apply(tf.trainable_variables()) 38 | with tf.control_dependencies([train_step, ema_op]): 39 | train_op = tf.no_op(name='train') 40 | 41 | saver = tf.train.Saver() 42 | 43 | with tf.Session() as sess: 44 | init_op = tf.global_variables_initializer() 45 | sess.run(init_op) 46 | 47 | ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) 48 | if ckpt and ckpt.model_checkpoint_path: 49 | saver.restore(sess, ckpt.model_checkpoint_path) 50 | 51 | for i in range(STEPS): 52 | xs, ys = mnist.train.next_batch(BATCH_SIZE) 53 | _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) 54 | if i % 1000 == 0: 55 | print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) 56 | saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 57 | 58 | 59 | def main(): 60 | mnist = input_data.read_data_sets("./data/", one_hot=True) 61 | backward(mnist) 62 | 63 | if __name__ == '__main__': 64 | main() 65 | 66 | 67 | -------------------------------------------------------------------------------- /fc2/mnist_forward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | INPUT_NODE = 784 4 | OUTPUT_NODE = 10 5 | LAYER1_NODE = 500 6 | 7 | def get_weight(shape, regularizer): 8 | w = tf.Variable(tf.truncated_normal(shape,stddev=0.1)) 9 | if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 10 | return w 11 | 12 | 13 | def get_bias(shape): 14 | b = tf.Variable(tf.zeros(shape)) 15 | return b 16 | 17 | def forward(x, regularizer): 18 | w1 = get_weight([INPUT_NODE, LAYER1_NODE], regularizer) 19 | b1 = get_bias([LAYER1_NODE]) 20 | y1 = tf.nn.relu(tf.matmul(x, w1) + b1) 21 | 22 | w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer) 23 | b2 = get_bias([OUTPUT_NODE]) 24 | y = tf.matmul(y1, w2) + b2 25 | return y 26 | -------------------------------------------------------------------------------- /fc2/mnist_test.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import time 3 | import tensorflow as tf 4 | from tensorflow.examples.tutorials.mnist import input_data 5 | import mnist_forward 6 | import mnist_backward 7 | TEST_INTERVAL_SECS = 5 8 | 9 | def test(mnist): 10 | with tf.Graph().as_default() as g: 11 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 12 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 13 | y = mnist_forward.forward(x, None) 14 | 15 | ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) 16 | ema_restore = ema.variables_to_restore() 17 | saver = tf.train.Saver(ema_restore) 18 | 19 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 20 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 21 | 22 | while True: 23 | with tf.Session() as sess: 24 | ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) 25 | if ckpt and ckpt.model_checkpoint_path: 26 | saver.restore(sess, ckpt.model_checkpoint_path) 27 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 28 | accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) 29 | print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score)) 30 | else: 31 | print('No checkpoint file found') 32 | return 33 | time.sleep(TEST_INTERVAL_SECS) 34 | 35 | def main(): 36 | mnist = input_data.read_data_sets("./data/", one_hot=True) 37 | test(mnist) 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /fc3.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3.zip -------------------------------------------------------------------------------- /fc3/mnist_app.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | 3 | import tensorflow as tf 4 | import numpy as np 5 | from PIL import Image 6 | import mnist_backward 7 | import mnist_forward 8 | 9 | def restore_model(testPicArr): 10 | with tf.Graph().as_default() as tg: 11 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 12 | y = mnist_forward.forward(x, None) 13 | preValue = tf.argmax(y, 1) 14 | 15 | variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) 16 | variables_to_restore = variable_averages.variables_to_restore() 17 | saver = tf.train.Saver(variables_to_restore) 18 | 19 | with tf.Session() as sess: 20 | ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) 21 | if ckpt and ckpt.model_checkpoint_path: 22 | saver.restore(sess, ckpt.model_checkpoint_path) 23 | 24 | preValue = sess.run(preValue, feed_dict={x:testPicArr}) 25 | return preValue 26 | else: 27 | print("No checkpoint file found") 28 | return -1 29 | 30 | def pre_pic(picName): 31 | img = Image.open(picName) 32 | reIm = img.resize((28,28), Image.ANTIALIAS) 33 | im_arr = np.array(reIm.convert('L')) 34 | threshold = 50 35 | for i in range(28): 36 | for j in range(28): 37 | im_arr[i][j] = 255 - im_arr[i][j] 38 | if (im_arr[i][j] < threshold): 39 | im_arr[i][j] = 0 40 | else: im_arr[i][j] = 255 41 | 42 | nm_arr = im_arr.reshape([1, 784]) 43 | nm_arr = nm_arr.astype(np.float32) 44 | img_ready = np.multiply(nm_arr, 1.0/255.0) 45 | 46 | return img_ready 47 | 48 | def application(): 49 | testNum = input("input the number of test pictures:") 50 | for i in range(testNum): 51 | testPic = raw_input("the path of test picture:") 52 | testPicArr = pre_pic(testPic) 53 | preValue = restore_model(testPicArr) 54 | print "The prediction number is:", preValue 55 | 56 | def main(): 57 | application() 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /fc3/mnist_backward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.examples.tutorials.mnist import input_data 3 | import mnist_forward 4 | import os 5 | 6 | BATCH_SIZE = 200 7 | LEARNING_RATE_BASE = 0.1 8 | LEARNING_RATE_DECAY = 0.99 9 | REGULARIZER = 0.0001 10 | STEPS = 50000 11 | MOVING_AVERAGE_DECAY = 0.99 12 | MODEL_SAVE_PATH="./model/" 13 | MODEL_NAME="mnist_model" 14 | 15 | 16 | def backward(mnist): 17 | 18 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 19 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 20 | y = mnist_forward.forward(x, REGULARIZER) 21 | global_step = tf.Variable(0, trainable=False) 22 | 23 | ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) 24 | cem = tf.reduce_mean(ce) 25 | loss = cem + tf.add_n(tf.get_collection('losses')) 26 | 27 | learning_rate = tf.train.exponential_decay( 28 | LEARNING_RATE_BASE, 29 | global_step, 30 | mnist.train.num_examples / BATCH_SIZE, 31 | LEARNING_RATE_DECAY, 32 | staircase=True) 33 | 34 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) 35 | 36 | ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 37 | ema_op = ema.apply(tf.trainable_variables()) 38 | with tf.control_dependencies([train_step, ema_op]): 39 | train_op = tf.no_op(name='train') 40 | 41 | saver = tf.train.Saver() 42 | 43 | with tf.Session() as sess: 44 | init_op = tf.global_variables_initializer() 45 | sess.run(init_op) 46 | 47 | ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) 48 | if ckpt and ckpt.model_checkpoint_path: 49 | saver.restore(sess, ckpt.model_checkpoint_path) 50 | 51 | for i in range(STEPS): 52 | xs, ys = mnist.train.next_batch(BATCH_SIZE) 53 | _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) 54 | if i % 1000 == 0: 55 | print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) 56 | saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 57 | 58 | 59 | def main(): 60 | mnist = input_data.read_data_sets("./data/", one_hot=True) 61 | backward(mnist) 62 | 63 | if __name__ == '__main__': 64 | main() 65 | 66 | 67 | -------------------------------------------------------------------------------- /fc3/mnist_forward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | INPUT_NODE = 784 4 | OUTPUT_NODE = 10 5 | LAYER1_NODE = 500 6 | 7 | def get_weight(shape, regularizer): 8 | w = tf.Variable(tf.truncated_normal(shape,stddev=0.1)) 9 | if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 10 | return w 11 | 12 | 13 | def get_bias(shape): 14 | b = tf.Variable(tf.zeros(shape)) 15 | return b 16 | 17 | def forward(x, regularizer): 18 | w1 = get_weight([INPUT_NODE, LAYER1_NODE], regularizer) 19 | b1 = get_bias([LAYER1_NODE]) 20 | y1 = tf.nn.relu(tf.matmul(x, w1) + b1) 21 | 22 | w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer) 23 | b2 = get_bias([OUTPUT_NODE]) 24 | y = tf.matmul(y1, w2) + b2 25 | return y 26 | -------------------------------------------------------------------------------- /fc3/mnist_test.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import time 3 | import tensorflow as tf 4 | from tensorflow.examples.tutorials.mnist import input_data 5 | import mnist_forward 6 | import mnist_backward 7 | TEST_INTERVAL_SECS = 5 8 | 9 | def test(mnist): 10 | with tf.Graph().as_default() as g: 11 | x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) 12 | y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE]) 13 | y = mnist_forward.forward(x, None) 14 | 15 | ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) 16 | ema_restore = ema.variables_to_restore() 17 | saver = tf.train.Saver(ema_restore) 18 | 19 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 20 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 21 | 22 | while True: 23 | with tf.Session() as sess: 24 | ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) 25 | if ckpt and ckpt.model_checkpoint_path: 26 | saver.restore(sess, ckpt.model_checkpoint_path) 27 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 28 | accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) 29 | print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score)) 30 | else: 31 | print('No checkpoint file found') 32 | return 33 | time.sleep(TEST_INTERVAL_SECS) 34 | 35 | def main(): 36 | mnist = input_data.read_data_sets("./data/", one_hot=True) 37 | test(mnist) 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /fc3/pic/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/0.png -------------------------------------------------------------------------------- /fc3/pic/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/1.png -------------------------------------------------------------------------------- /fc3/pic/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/2.png -------------------------------------------------------------------------------- /fc3/pic/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/3.png -------------------------------------------------------------------------------- /fc3/pic/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/4.png -------------------------------------------------------------------------------- /fc3/pic/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/5.png -------------------------------------------------------------------------------- /fc3/pic/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/6.png -------------------------------------------------------------------------------- /fc3/pic/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/7.png -------------------------------------------------------------------------------- /fc3/pic/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/8.png -------------------------------------------------------------------------------- /fc3/pic/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc3/pic/9.png -------------------------------------------------------------------------------- /fc4.z01: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4.z01 -------------------------------------------------------------------------------- /fc4.z02: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4.z02 -------------------------------------------------------------------------------- /fc4.z03: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4.z03 -------------------------------------------------------------------------------- /fc4.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4.zip -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/10_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/10_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/11_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/11_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/12_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/12_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/13_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/13_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/14_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/14_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/15_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/15_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/16_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/16_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/17_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/17_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/18_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/18_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/19_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/19_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/1_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/1_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/20_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/20_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/21_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/21_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/22_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/22_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/23_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/23_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/24_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/24_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/25_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/25_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/26_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/26_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/27_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/27_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/28_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/28_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/29_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/29_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/2_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/2_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/30_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/30_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/31_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/31_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/32_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/32_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/33_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/33_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/34_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/34_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/35_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/35_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/36_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/36_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/37_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/37_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/38_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/38_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/39_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/39_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/3_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/3_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/40_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/40_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/41_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/41_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/42_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/42_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/43_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/43_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/44_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/44_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/45_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/45_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/46_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/46_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/47_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/47_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/48_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/48_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/49_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/49_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/4_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/4_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/50_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/50_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/51_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/51_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/52_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/52_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/53_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/53_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/54_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/54_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/55_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/55_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/56_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/56_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/57_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/57_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/58_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/58_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/59_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/59_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/5_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/5_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/60_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/60_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/61_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/61_8.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/62_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/62_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/63_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/63_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/64_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/64_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/65_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/65_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/66_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/66_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/67_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/67_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/68_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/68_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/69_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/69_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/6_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/6_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/70_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/70_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/71_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/71_0.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/72_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/72_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/73_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/73_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/74_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/74_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/75_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/75_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/76_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/76_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/77_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/77_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/78_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/78_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/79_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/79_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/7_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/7_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/80_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/80_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/81_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/81_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/82_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/82_2.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/83_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/83_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/84_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/84_8.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/85_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/85_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/86_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/86_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/87_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/87_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/88_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/88_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/89_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/89_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/8_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/8_5.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/90_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/90_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/91_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/91_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/92_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/92_9.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/93_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/93_3.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/94_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/94_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/95_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/95_4.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/96_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/96_1.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/97_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/97_7.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/98_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/98_6.jpg -------------------------------------------------------------------------------- /fc4/mnist_data_jpg/mnist_test_jpg_10000/9_9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/fc4/mnist_data_jpg/mnist_test_jpg_10000/9_9.jpg -------------------------------------------------------------------------------- /lenet5.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/lenet5.zip -------------------------------------------------------------------------------- /lenet5/mnist_lenet5_backward.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import tensorflow as tf 3 | from tensorflow.examples.tutorials.mnist import input_data 4 | import mnist_lenet5_forward 5 | import os 6 | import numpy as np 7 | 8 | BATCH_SIZE = 100 9 | LEARNING_RATE_BASE = 0.005 10 | LEARNING_RATE_DECAY = 0.99 11 | REGULARIZER = 0.0001 12 | STEPS = 50000 13 | MOVING_AVERAGE_DECAY = 0.99 14 | MODEL_SAVE_PATH="./model/" 15 | MODEL_NAME="mnist_model" 16 | 17 | def backward(mnist): 18 | x = tf.placeholder(tf.float32,[ 19 | BATCH_SIZE, 20 | mnist_lenet5_forward.IMAGE_SIZE, 21 | mnist_lenet5_forward.IMAGE_SIZE, 22 | mnist_lenet5_forward.NUM_CHANNELS]) 23 | y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) 24 | y = mnist_lenet5_forward.forward(x,True, REGULARIZER) 25 | global_step = tf.Variable(0, trainable=False) 26 | 27 | ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) 28 | cem = tf.reduce_mean(ce) 29 | loss = cem + tf.add_n(tf.get_collection('losses')) 30 | 31 | learning_rate = tf.train.exponential_decay( 32 | LEARNING_RATE_BASE, 33 | global_step, 34 | mnist.train.num_examples / BATCH_SIZE, 35 | LEARNING_RATE_DECAY, 36 | staircase=True) 37 | 38 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) 39 | 40 | ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 41 | ema_op = ema.apply(tf.trainable_variables()) 42 | with tf.control_dependencies([train_step, ema_op]): 43 | train_op = tf.no_op(name='train') 44 | 45 | saver = tf.train.Saver() 46 | 47 | with tf.Session() as sess: 48 | init_op = tf.global_variables_initializer() 49 | sess.run(init_op) 50 | 51 | ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) 52 | if ckpt and ckpt.model_checkpoint_path: 53 | saver.restore(sess, ckpt.model_checkpoint_path) 54 | 55 | for i in range(STEPS): 56 | xs, ys = mnist.train.next_batch(BATCH_SIZE) 57 | reshaped_xs = np.reshape(xs,( 58 | BATCH_SIZE, 59 | mnist_lenet5_forward.IMAGE_SIZE, 60 | mnist_lenet5_forward.IMAGE_SIZE, 61 | mnist_lenet5_forward.NUM_CHANNELS)) 62 | _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys}) 63 | if i % 100 == 0: 64 | print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) 65 | saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) 66 | 67 | def main(): 68 | mnist = input_data.read_data_sets("./data/", one_hot=True) 69 | backward(mnist) 70 | 71 | if __name__ == '__main__': 72 | main() 73 | 74 | 75 | -------------------------------------------------------------------------------- /lenet5/mnist_lenet5_forward.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import tensorflow as tf 3 | IMAGE_SIZE = 28 4 | NUM_CHANNELS = 1 5 | CONV1_SIZE = 5 6 | CONV1_KERNEL_NUM = 32 7 | CONV2_SIZE = 5 8 | CONV2_KERNEL_NUM = 64 9 | FC_SIZE = 512 10 | OUTPUT_NODE = 10 11 | 12 | def get_weight(shape, regularizer): 13 | w = tf.Variable(tf.truncated_normal(shape,stddev=0.1)) 14 | if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 15 | return w 16 | 17 | def get_bias(shape): 18 | b = tf.Variable(tf.zeros(shape)) 19 | return b 20 | 21 | def conv2d(x,w): 22 | return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') 23 | 24 | def max_pool_2x2(x): 25 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 26 | 27 | def forward(x, train, regularizer): 28 | conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer) 29 | conv1_b = get_bias([CONV1_KERNEL_NUM]) 30 | conv1 = conv2d(x, conv1_w) 31 | relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b)) 32 | pool1 = max_pool_2x2(relu1) 33 | 34 | conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM],regularizer) 35 | conv2_b = get_bias([CONV2_KERNEL_NUM]) 36 | conv2 = conv2d(pool1, conv2_w) 37 | relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b)) 38 | pool2 = max_pool_2x2(relu2) 39 | 40 | pool_shape = pool2.get_shape().as_list() 41 | nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] 42 | reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) 43 | 44 | fc1_w = get_weight([nodes, FC_SIZE], regularizer) 45 | fc1_b = get_bias([FC_SIZE]) 46 | fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b) 47 | if train: fc1 = tf.nn.dropout(fc1, 0.5) 48 | 49 | fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer) 50 | fc2_b = get_bias([OUTPUT_NODE]) 51 | y = tf.matmul(fc1, fc2_w) + fc2_b 52 | return y 53 | -------------------------------------------------------------------------------- /lenet5/mnist_lenet5_test.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import time 3 | import tensorflow as tf 4 | from tensorflow.examples.tutorials.mnist import input_data 5 | import mnist_lenet5_forward 6 | import mnist_lenet5_backward 7 | import numpy as np 8 | 9 | TEST_INTERVAL_SECS = 5 10 | 11 | def test(mnist): 12 | with tf.Graph().as_default() as g: 13 | x = tf.placeholder(tf.float32,[ 14 | mnist.test.num_examples, 15 | mnist_lenet5_forward.IMAGE_SIZE, 16 | mnist_lenet5_forward.IMAGE_SIZE, 17 | mnist_lenet5_forward.NUM_CHANNELS]) 18 | y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE]) 19 | y = mnist_lenet5_forward.forward(x,False,None) 20 | 21 | ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY) 22 | ema_restore = ema.variables_to_restore() 23 | saver = tf.train.Saver(ema_restore) 24 | 25 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 26 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 27 | 28 | while True: 29 | with tf.Session() as sess: 30 | ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH) 31 | if ckpt and ckpt.model_checkpoint_path: 32 | saver.restore(sess, ckpt.model_checkpoint_path) 33 | 34 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 35 | reshaped_x = np.reshape(mnist.test.images,( 36 | mnist.test.num_examples, 37 | mnist_lenet5_forward.IMAGE_SIZE, 38 | mnist_lenet5_forward.IMAGE_SIZE, 39 | mnist_lenet5_forward.NUM_CHANNELS)) 40 | accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:mnist.test.labels}) 41 | print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score)) 42 | else: 43 | print('No checkpoint file found') 44 | return 45 | time.sleep(TEST_INTERVAL_SECS) 46 | 47 | def main(): 48 | mnist = input_data.read_data_sets("./data/", one_hot=True) 49 | test(mnist) 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /num.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/num.zip -------------------------------------------------------------------------------- /opt.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/opt.zip -------------------------------------------------------------------------------- /opt/opt4_1.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #预测多或预测少的影响一样 3 | #0导入模块,生成数据集 4 | import tensorflow as tf 5 | import numpy as np 6 | BATCH_SIZE = 8 7 | SEED = 23455 8 | 9 | rdm = np.random.RandomState(SEED) 10 | X = rdm.rand(32,2) 11 | Y_ = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X] 12 | 13 | #1定义神经网络的输入、参数和输出,定义前向传播过程。 14 | x = tf.placeholder(tf.float32, shape=(None, 2)) 15 | y_ = tf.placeholder(tf.float32, shape=(None, 1)) 16 | w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1)) 17 | y = tf.matmul(x, w1) 18 | 19 | #2定义损失函数及反向传播方法。 20 | #定义损失函数为MSE,反向传播方法为梯度下降。 21 | loss_mse = tf.reduce_mean(tf.square(y_ - y)) 22 | train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mse) 23 | 24 | #3生成会话,训练STEPS轮 25 | with tf.Session() as sess: 26 | init_op = tf.global_variables_initializer() 27 | sess.run(init_op) 28 | STEPS = 20000 29 | for i in range(STEPS): 30 | start = (i*BATCH_SIZE) % 32 31 | end = (i*BATCH_SIZE) % 32 + BATCH_SIZE 32 | sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]}) 33 | if i % 500 == 0: 34 | print "After %d training steps, w1 is: " % (i) 35 | print sess.run(w1), "\n" 36 | print "Final w1 is: \n", sess.run(w1) 37 | #在本代码#2中尝试其他反向传播方法,看对收敛速度的影响,把体会写到笔记中 38 | -------------------------------------------------------------------------------- /opt/opt4_2.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #酸奶成本1元, 酸奶利润9元 3 | #预测少了损失大,故不要预测少,故生成的模型会多预测一些 4 | #0导入模块,生成数据集 5 | import tensorflow as tf 6 | import numpy as np 7 | BATCH_SIZE = 8 8 | SEED = 23455 9 | COST = 1 10 | PROFIT = 9 11 | 12 | rdm = np.random.RandomState(SEED) 13 | X = rdm.rand(32,2) 14 | Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X] 15 | 16 | #1定义神经网络的输入、参数和输出,定义前向传播过程。 17 | x = tf.placeholder(tf.float32, shape=(None, 2)) 18 | y_ = tf.placeholder(tf.float32, shape=(None, 1)) 19 | w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1)) 20 | y = tf.matmul(x, w1) 21 | 22 | #2定义损失函数及反向传播方法。 23 | # 定义损失函数使得预测少了的损失大,于是模型应该偏向多的方向预测。 24 | loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_)*COST, (y_ - y)*PROFIT)) 25 | train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss) 26 | 27 | #3生成会话,训练STEPS轮。 28 | with tf.Session() as sess: 29 | init_op = tf.global_variables_initializer() 30 | sess.run(init_op) 31 | STEPS = 3000 32 | for i in range(STEPS): 33 | start = (i*BATCH_SIZE) % 32 34 | end = (i*BATCH_SIZE) % 32 + BATCH_SIZE 35 | sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) 36 | if i % 500 == 0: 37 | print "After %d training steps, w1 is: " % (i) 38 | print sess.run(w1), "\n" 39 | print "Final w1 is: \n", sess.run(w1) 40 | -------------------------------------------------------------------------------- /opt/opt4_3.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #酸奶成本9元, 酸奶利润1元 3 | #预测多了损失大,故不要预测多,故生成的模型会少预测一些 4 | #0导入模块,生成数据集 5 | import tensorflow as tf 6 | import numpy as np 7 | BATCH_SIZE = 8 8 | SEED = 23455 9 | COST = 9 10 | PROFIT = 1 11 | 12 | rdm = np.random.RandomState(SEED) 13 | X = rdm.rand(32,2) 14 | Y = [[x1+x2+(rdm.rand()/10.0-0.05)] for (x1, x2) in X] 15 | 16 | #1定义神经网络的输入、参数和输出,定义前向传播过程。 17 | x = tf.placeholder(tf.float32, shape=(None, 2)) 18 | y_ = tf.placeholder(tf.float32, shape=(None, 1)) 19 | w1= tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1)) 20 | y = tf.matmul(x, w1) 21 | 22 | #2定义损失函数及反向传播方法。 23 | #重新定义损失函数,使得预测多了的损失大,于是模型应该偏向少的方向预测。 24 | loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_)*COST, (y_ - y)*PROFIT)) 25 | train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss) 26 | 27 | #3生成会话,训练STEPS轮。 28 | with tf.Session() as sess: 29 | init_op = tf.global_variables_initializer() 30 | sess.run(init_op) 31 | STEPS = 3000 32 | for i in range(STEPS): 33 | start = (i*BATCH_SIZE) % 32 34 | end = (i*BATCH_SIZE) % 32 + BATCH_SIZE 35 | sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]}) 36 | if i % 500 == 0: 37 | print "After %d training steps, w1 is: " % (i) 38 | print sess.run(w1), "\n" 39 | print "Final w1 is: \n", sess.run(w1) 40 | -------------------------------------------------------------------------------- /opt/opt4_4.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #设损失函数 loss=(w+1)^2, 令w初值是常数5。反向传播就是求最优w,即求最小loss对应的w值 3 | import tensorflow as tf 4 | #定义待优化参数w初值赋5 5 | w = tf.Variable(tf.constant(5, dtype=tf.float32)) 6 | #定义损失函数loss 7 | loss = tf.square(w+1) 8 | #定义反向传播方法 9 | train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) 10 | #生成会话,训练40轮 11 | with tf.Session() as sess: 12 | init_op=tf.global_variables_initializer() 13 | sess.run(init_op) 14 | for i in range(40): 15 | sess.run(train_step) 16 | w_val = sess.run(w) 17 | loss_val = sess.run(loss) 18 | print "After %s steps: w is %f, loss is %f." % (i, w_val,loss_val) 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /opt/opt4_5.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #设损失函数 loss=(w+1)^2, 令w初值是常数10。反向传播就是求最优w,即求最小loss对应的w值 3 | #使用指数衰减的学习率,在迭代初期得到较高的下降速度,可以在较小的训练轮数下取得更有收敛度。 4 | import tensorflow as tf 5 | 6 | LEARNING_RATE_BASE = 0.1 #最初学习率 7 | LEARNING_RATE_DECAY = 0.99 #学习率衰减率 8 | LEARNING_RATE_STEP = 1 #喂入多少轮BATCH_SIZE后,更新一次学习率,一般设为:总样本数/BATCH_SIZE 9 | 10 | #运行了几轮BATCH_SIZE的计数器,初值给0, 设为不被训练 11 | global_step = tf.Variable(0, trainable=False) 12 | #定义指数下降学习率 13 | learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, LEARNING_RATE_STEP, LEARNING_RATE_DECAY, staircase=True) 14 | #定义待优化参数,初值给10 15 | w = tf.Variable(tf.constant(5, dtype=tf.float32)) 16 | #定义损失函数loss 17 | loss = tf.square(w+1) 18 | #定义反向传播方法 19 | train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) 20 | #生成会话,训练40轮 21 | with tf.Session() as sess: 22 | init_op=tf.global_variables_initializer() 23 | sess.run(init_op) 24 | for i in range(40): 25 | sess.run(train_step) 26 | learning_rate_val = sess.run(learning_rate) 27 | global_step_val = sess.run(global_step) 28 | w_val = sess.run(w) 29 | loss_val = sess.run(loss) 30 | print "After %s steps: global_step is %f, w is %f, learning rate is %f, loss is %f" % (i, global_step_val, w_val, learning_rate_val, loss_val) 31 | -------------------------------------------------------------------------------- /opt/opt4_6.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import tensorflow as tf 3 | 4 | #1. 定义变量及滑动平均类 5 | #定义一个32位浮点变量,初始值为0.0 这个代码就是不断更新w1参数,优化w1参数,滑动平均做了个w1的影子 6 | w1 = tf.Variable(0, dtype=tf.float32) 7 | #定义num_updates(NN的迭代轮数),初始值为0,不可被优化(训练),这个参数不训练 8 | global_step = tf.Variable(0, trainable=False) 9 | #实例化滑动平均类,给衰减率为0.99,当前轮数global_step 10 | MOVING_AVERAGE_DECAY = 0.99 11 | ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) 12 | #ema.apply后的括号里是更新列表,每次运行sess.run(ema_op)时,对更新列表中的元素求滑动平均值。 13 | #在实际应用中会使用tf.trainable_variables()自动将所有待训练的参数汇总为列表 14 | #ema_op = ema.apply([w1]) 15 | ema_op = ema.apply(tf.trainable_variables()) 16 | 17 | #2. 查看不同迭代中变量取值的变化。 18 | with tf.Session() as sess: 19 | # 初始化 20 | init_op = tf.global_variables_initializer() 21 | sess.run(init_op) 22 | #用ema.average(w1)获取w1滑动平均值 (要运行多个节点,作为列表中的元素列出,写在sess.run中) 23 | #打印出当前参数w1和w1滑动平均值 24 | print "current global_step:", sess.run(global_step) 25 | print "current w1", sess.run([w1, ema.average(w1)]) 26 | 27 | # 参数w1的值赋为1 28 | sess.run(tf.assign(w1, 1)) 29 | sess.run(ema_op) 30 | print "current global_step:", sess.run(global_step) 31 | print "current w1", sess.run([w1, ema.average(w1)]) 32 | 33 | # 更新global_step和w1的值,模拟出轮数为100时,参数w1变为10, 以下代码global_step保持为100,每次执行滑动平均操作,影子值会更新 34 | sess.run(tf.assign(global_step, 100)) 35 | sess.run(tf.assign(w1, 10)) 36 | sess.run(ema_op) 37 | print "current global_step:", sess.run(global_step) 38 | print "current w1:", sess.run([w1, ema.average(w1)]) 39 | 40 | # 每次sess.run会更新一次w1的滑动平均值 41 | sess.run(ema_op) 42 | print "current global_step:" , sess.run(global_step) 43 | print "current w1:", sess.run([w1, ema.average(w1)]) 44 | 45 | sess.run(ema_op) 46 | print "current global_step:" , sess.run(global_step) 47 | print "current w1:", sess.run([w1, ema.average(w1)]) 48 | 49 | sess.run(ema_op) 50 | print "current global_step:" , sess.run(global_step) 51 | print "current w1:", sess.run([w1, ema.average(w1)]) 52 | 53 | sess.run(ema_op) 54 | print "current global_step:" , sess.run(global_step) 55 | print "current w1:", sess.run([w1, ema.average(w1)]) 56 | 57 | sess.run(ema_op) 58 | print "current global_step:" , sess.run(global_step) 59 | print "current w1:", sess.run([w1, ema.average(w1)]) 60 | 61 | sess.run(ema_op) 62 | print "current global_step:" , sess.run(global_step) 63 | print "current w1:", sess.run([w1, ema.average(w1)]) 64 | 65 | #更改MOVING_AVERAGE_DECAY 为 0.1 看影子追随速度 66 | 67 | """ 68 | 69 | current global_step: 0 70 | current w1 [0.0, 0.0] 71 | current global_step: 0 72 | current w1 [1.0, 0.9] 73 | current global_step: 100 74 | current w1: [10.0, 1.6445453] 75 | current global_step: 100 76 | current w1: [10.0, 2.3281732] 77 | current global_step: 100 78 | current w1: [10.0, 2.955868] 79 | current global_step: 100 80 | current w1: [10.0, 3.532206] 81 | current global_step: 100 82 | current w1: [10.0, 4.061389] 83 | current global_step: 100 84 | current w1: [10.0, 4.547275] 85 | current global_step: 100 86 | current w1: [10.0, 4.9934072] 87 | 88 | """ 89 | -------------------------------------------------------------------------------- /opt/opt4_7.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #0导入模块 ,生成模拟数据集 3 | import tensorflow as tf 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | BATCH_SIZE = 30 7 | seed = 2 8 | #基于seed产生随机数 9 | rdm = np.random.RandomState(seed) 10 | #随机数返回300行2列的矩阵,表示300组坐标点(x0,x1)作为输入数据集 11 | X = rdm.randn(300,2) 12 | #从X这个300行2列的矩阵中取出一行,判断如果两个坐标的平方和小于2,给Y赋值1,其余赋值0 13 | #作为输入数据集的标签(正确答案) 14 | Y_ = [int(x0*x0 + x1*x1 <2) for (x0,x1) in X] 15 | #遍历Y中的每个元素,1赋值'red'其余赋值'blue',这样可视化显示时人可以直观区分 16 | Y_c = [['red' if y else 'blue'] for y in Y_] 17 | #对数据集X和标签Y进行shape整理,第一个元素为-1表示,随第二个参数计算得到,第二个元素表示多少列,把X整理为n行2列,把Y整理为n行1列 18 | X = np.vstack(X).reshape(-1,2) 19 | Y_ = np.vstack(Y_).reshape(-1,1) 20 | print X 21 | print Y_ 22 | print Y_c 23 | #用plt.scatter画出数据集X各行中第0列元素和第1列元素的点即各行的(x0,x1),用各行Y_c对应的值表示颜色(c是color的缩写) 24 | plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 25 | plt.show() 26 | 27 | 28 | #定义神经网络的输入、参数和输出,定义前向传播过程 29 | def get_weight(shape, regularizer): 30 | w = tf.Variable(tf.random_normal(shape), dtype=tf.float32) 31 | tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 32 | return w 33 | 34 | def get_bias(shape): 35 | b = tf.Variable(tf.constant(0.01, shape=shape)) 36 | return b 37 | 38 | x = tf.placeholder(tf.float32, shape=(None, 2)) 39 | y_ = tf.placeholder(tf.float32, shape=(None, 1)) 40 | 41 | w1 = get_weight([2,11], 0.01) 42 | b1 = get_bias([11]) 43 | y1 = tf.nn.relu(tf.matmul(x, w1)+b1) 44 | 45 | w2 = get_weight([11,1], 0.01) 46 | b2 = get_bias([1]) 47 | y = tf.matmul(y1, w2)+b2 48 | 49 | 50 | #定义损失函数 51 | loss_mse = tf.reduce_mean(tf.square(y-y_)) 52 | loss_total = loss_mse + tf.add_n(tf.get_collection('losses')) 53 | 54 | 55 | #定义反向传播方法:不含正则化 56 | train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_mse) 57 | 58 | with tf.Session() as sess: 59 | init_op = tf.global_variables_initializer() 60 | sess.run(init_op) 61 | STEPS = 40000 62 | for i in range(STEPS): 63 | start = (i*BATCH_SIZE) % 300 64 | end = start + BATCH_SIZE 65 | sess.run(train_step, feed_dict={x:X[start:end], y_:Y_[start:end]}) 66 | if i % 2000 == 0: 67 | loss_mse_v = sess.run(loss_mse, feed_dict={x:X, y_:Y_}) 68 | print("After %d steps, loss is: %f" %(i, loss_mse_v)) 69 | #xx在-3到3之间以步长为0.01,yy在-3到3之间以步长0.01,生成二维网格坐标点 70 | xx, yy = np.mgrid[-3:3:.01, -3:3:.01] 71 | #将xx , yy拉直,并合并成一个2列的矩阵,得到一个网格坐标点的集合 72 | grid = np.c_[xx.ravel(), yy.ravel()] 73 | #将网格坐标点喂入神经网络 ,probs为输出 74 | probs = sess.run(y, feed_dict={x:grid}) 75 | #probs的shape调整成xx的样子 76 | probs = probs.reshape(xx.shape) 77 | print "w1:\n",sess.run(w1) 78 | print "b1:\n",sess.run(b1) 79 | print "w2:\n",sess.run(w2) 80 | print "b2:\n",sess.run(b2) 81 | 82 | plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 83 | plt.contour(xx, yy, probs, levels=[.5]) 84 | plt.show() 85 | 86 | 87 | 88 | #定义反向传播方法:包含正则化 89 | train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_total) 90 | 91 | with tf.Session() as sess: 92 | init_op = tf.global_variables_initializer() 93 | sess.run(init_op) 94 | STEPS = 40000 95 | for i in range(STEPS): 96 | start = (i*BATCH_SIZE) % 300 97 | end = start + BATCH_SIZE 98 | sess.run(train_step, feed_dict={x: X[start:end], y_:Y_[start:end]}) 99 | if i % 2000 == 0: 100 | loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_}) 101 | print("After %d steps, loss is: %f" %(i, loss_v)) 102 | 103 | xx, yy = np.mgrid[-3:3:.01, -3:3:.01] 104 | grid = np.c_[xx.ravel(), yy.ravel()] 105 | probs = sess.run(y, feed_dict={x:grid}) 106 | probs = probs.reshape(xx.shape) 107 | print "w1:\n",sess.run(w1) 108 | print "b1:\n",sess.run(b1) 109 | print "w2:\n",sess.run(w2) 110 | print "b2:\n",sess.run(b2) 111 | 112 | plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 113 | plt.contour(xx, yy, probs, levels=[.5]) 114 | plt.show() 115 | 116 | -------------------------------------------------------------------------------- /opt/opt4_8_backward.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #0导入模块 ,生成模拟数据集 3 | import tensorflow as tf 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | import opt4_8_generateds 7 | import opt4_8_forward 8 | 9 | STEPS = 40000 10 | BATCH_SIZE = 30 11 | LEARNING_RATE_BASE = 0.001 12 | LEARNING_RATE_DECAY = 0.999 13 | REGULARIZER = 0.01 14 | 15 | def backward(): 16 | x = tf.placeholder(tf.float32, shape=(None, 2)) 17 | y_ = tf.placeholder(tf.float32, shape=(None, 1)) 18 | 19 | X, Y_, Y_c = opt4_8_generateds.generateds() 20 | 21 | y = opt4_8_forward.forward(x, REGULARIZER) 22 | 23 | global_step = tf.Variable(0,trainable=False) 24 | 25 | learning_rate = tf.train.exponential_decay( 26 | LEARNING_RATE_BASE, 27 | global_step, 28 | 300/BATCH_SIZE, 29 | LEARNING_RATE_DECAY, 30 | staircase=True) 31 | 32 | 33 | #定义损失函数 34 | loss_mse = tf.reduce_mean(tf.square(y-y_)) 35 | loss_total = loss_mse + tf.add_n(tf.get_collection('losses')) 36 | 37 | #定义反向传播方法:包含正则化 38 | train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total) 39 | 40 | with tf.Session() as sess: 41 | init_op = tf.global_variables_initializer() 42 | sess.run(init_op) 43 | for i in range(STEPS): 44 | start = (i*BATCH_SIZE) % 300 45 | end = start + BATCH_SIZE 46 | sess.run(train_step, feed_dict={x: X[start:end], y_:Y_[start:end]}) 47 | if i % 2000 == 0: 48 | loss_v = sess.run(loss_total, feed_dict={x:X,y_:Y_}) 49 | print("After %d steps, loss is: %f" %(i, loss_v)) 50 | 51 | xx, yy = np.mgrid[-3:3:.01, -3:3:.01] 52 | grid = np.c_[xx.ravel(), yy.ravel()] 53 | probs = sess.run(y, feed_dict={x:grid}) 54 | probs = probs.reshape(xx.shape) 55 | 56 | plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 57 | plt.contour(xx, yy, probs, levels=[.5]) 58 | plt.show() 59 | 60 | if __name__=='__main__': 61 | backward() 62 | -------------------------------------------------------------------------------- /opt/opt4_8_forward.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #0导入模块 ,生成模拟数据集 3 | import tensorflow as tf 4 | 5 | #定义神经网络的输入、参数和输出,定义前向传播过程 6 | def get_weight(shape, regularizer): 7 | w = tf.Variable(tf.random_normal(shape), dtype=tf.float32) 8 | tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) 9 | return w 10 | 11 | def get_bias(shape): 12 | b = tf.Variable(tf.constant(0.01, shape=shape)) 13 | return b 14 | 15 | def forward(x, regularizer): 16 | 17 | w1 = get_weight([2,11], regularizer) 18 | b1 = get_bias([11]) 19 | y1 = tf.nn.relu(tf.matmul(x, w1) + b1) 20 | 21 | w2 = get_weight([11,1], regularizer) 22 | b2 = get_bias([1]) 23 | y = tf.matmul(y1, w2) + b2 24 | 25 | return y 26 | -------------------------------------------------------------------------------- /opt/opt4_8_generateds.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #0导入模块 ,生成模拟数据集 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | seed = 2 6 | def generateds(): 7 | #基于seed产生随机数 8 | rdm = np.random.RandomState(seed) 9 | #随机数返回300行2列的矩阵,表示300组坐标点(x0,x1)作为输入数据集 10 | X = rdm.randn(300,2) 11 | #从X这个300行2列的矩阵中取出一行,判断如果两个坐标的平方和小于2,给Y赋值1,其余赋值0 12 | #作为输入数据集的标签(正确答案) 13 | Y_ = [int(x0*x0 + x1*x1 <2) for (x0,x1) in X] 14 | #遍历Y中的每个元素,1赋值'red'其余赋值'blue',这样可视化显示时人可以直观区分 15 | Y_c = [['red' if y else 'blue'] for y in Y_] 16 | #对数据集X和标签Y进行形状整理,第一个元素为-1表示跟随第二列计算,第二个元素表示多少列,可见X为两列,Y为1列 17 | X = np.vstack(X).reshape(-1,2) 18 | Y_ = np.vstack(Y_).reshape(-1,1) 19 | 20 | return X, Y_, Y_c 21 | 22 | #print X 23 | #print Y_ 24 | #print Y_c 25 | #用plt.scatter画出数据集X各行中第0列元素和第1列元素的点即各行的(x0,x1),用各行Y_c对应的值表示颜色(c是color的缩写) 26 | #plt.scatter(X[:,0], X[:,1], c=np.squeeze(Y_c)) 27 | #plt.show() 28 | -------------------------------------------------------------------------------- /pic.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic.zip -------------------------------------------------------------------------------- /pic/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/0.png -------------------------------------------------------------------------------- /pic/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/1.png -------------------------------------------------------------------------------- /pic/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/2.png -------------------------------------------------------------------------------- /pic/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/3.png -------------------------------------------------------------------------------- /pic/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/4.png -------------------------------------------------------------------------------- /pic/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/5.png -------------------------------------------------------------------------------- /pic/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/6.png -------------------------------------------------------------------------------- /pic/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/7.png -------------------------------------------------------------------------------- /pic/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/8.png -------------------------------------------------------------------------------- /pic/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/pic/9.png -------------------------------------------------------------------------------- /python.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/python.zip -------------------------------------------------------------------------------- /python/a.py: -------------------------------------------------------------------------------- 1 | 123 2 | -------------------------------------------------------------------------------- /python/animal.py: -------------------------------------------------------------------------------- 1 | class Animals(): 2 | def breathe(self): 3 | print " breathing" 4 | def move(self): 5 | print "moving" 6 | def eat (self): 7 | print "eating food" 8 | class Mammals(Animals): 9 | def breastfeed(self): 10 | print "feeding young" 11 | class Cats(Mammals): 12 | def __init__(self, spots): 13 | self.spots = spots 14 | def catch_mouse(self): 15 | print "catch mouse" 16 | def left_foot_forward(self): 17 | print "left foot forward" 18 | def left_foot_backward(self): 19 | print "left foot backward" 20 | def dance(self): 21 | self.left_foot_forward() 22 | self.left_foot_backward() 23 | self.left_foot_forward() 24 | self.left_foot_backward() 25 | kitty=Cats(10) 26 | print kitty.spots 27 | kitty.dance() 28 | kitty.breastfeed() 29 | kitty.move() 30 | -------------------------------------------------------------------------------- /python/b.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | age=input("输入你的年龄\n") 3 | if age>18: 4 | print "大于十八岁" 5 | print "你成年了" 6 | else: 7 | print "小于等于十八岁" 8 | print "还未成年" 9 | 10 | -------------------------------------------------------------------------------- /python/c.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | num=input("please input your class number:") 3 | if num==1 or num==2: 4 | print "class room 302" 5 | elif num==3: 6 | print "class room 303" 7 | elif num==4: 8 | print "class room 304" 9 | else: 10 | print "class room 305" 11 | -------------------------------------------------------------------------------- /python/save.dat: -------------------------------------------------------------------------------- 1 | (dp0 2 | S'pocket' 3 | p1 4 | (lp2 5 | S'key' 6 | p3 7 | aS'knife' 8 | p4 9 | asS'position' 10 | p5 11 | S'N2 E3' 12 | p6 13 | sS'money' 14 | p7 15 | I160 16 | s. -------------------------------------------------------------------------------- /python/tf3_1.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | a=tf.constant([1.0,2.0]) 3 | b=tf.constant([3.0,4.0]) 4 | result=a+b 5 | print result 6 | -------------------------------------------------------------------------------- /tf.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/tf.zip -------------------------------------------------------------------------------- /tf/tf3_1.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | a=tf.constant([1.0,2.0]) 3 | b=tf.constant([3.0,4.0]) 4 | result=a+b 5 | print result 6 | -------------------------------------------------------------------------------- /tf/tf3_2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | x = tf.constant([[1.0, 2.0]]) 3 | w = tf.constant([[3.0], [4.0]]) 4 | y=tf.matmul(x,w) 5 | print y 6 | with tf.Session() as sess: 7 | print sess.run(y) 8 | 9 | 10 | -------------------------------------------------------------------------------- /tf/tf3_3.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #两层简单神经网络(全连接) 3 | import tensorflow as tf 4 | 5 | #定义输入和参数 6 | x = tf.constant([[0.7, 0.5]]) 7 | w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) 8 | w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) 9 | 10 | #定义前向传播过程 11 | a = tf.matmul(x, w1) 12 | y = tf.matmul(a, w2) 13 | 14 | #用会话计算结果 15 | with tf.Session() as sess: 16 | init_op = tf.global_variables_initializer() 17 | sess.run(init_op) 18 | print"y in tf3_3.py is:\n",sess.run(y) 19 | 20 | ''' 21 | y in tf3_3.py is : 22 | [[3.0904665]] 23 | ''' 24 | 25 | -------------------------------------------------------------------------------- /tf/tf3_4.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #两层简单神经网络(全连接) 3 | 4 | import tensorflow as tf 5 | 6 | #定义输入和参数 7 | #用placeholder实现输入定义 (sess.run中喂一组数据) 8 | x = tf.placeholder(tf.float32, shape=(1, 2)) 9 | w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) 10 | w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) 11 | 12 | 13 | #定义前向传播过程 14 | a = tf.matmul(x, w1) 15 | y = tf.matmul(a, w2) 16 | 17 | 18 | #用会话计算结果 19 | with tf.Session() as sess: 20 | init_op = tf.global_variables_initializer() 21 | sess.run(init_op) 22 | print"y in tf3_4.py is:\n",sess.run(y, feed_dict={x: [[0.7,0.5]]}) 23 | 24 | ''' 25 | y in tf3_4.py is: 26 | [[3.0904665]] 27 | ''' 28 | 29 | -------------------------------------------------------------------------------- /tf/tf3_5.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #两层简单神经网络(全连接) 3 | 4 | import tensorflow as tf 5 | 6 | #定义输入和参数 7 | #用placeholder定义输入(sess.run喂多组数据) 8 | x = tf.placeholder(tf.float32, shape=(None, 2)) 9 | w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) 10 | w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) 11 | 12 | #定义前向传播过程 13 | a = tf.matmul(x, w1) 14 | y = tf.matmul(a, w2) 15 | 16 | #调用会话计算结果 17 | with tf.Session() as sess: 18 | init_op = tf.global_variables_initializer() 19 | sess.run(init_op) 20 | print "the result of tf3_5.py is:\n",sess.run(y, feed_dict={x: [[0.7,0.5],[0.2,0.3],[0.3,0.4],[0.4,0.5]]}) 21 | print "w1:\n", sess.run(w1) 22 | print "w2:\n", sess.run(w2) 23 | 24 | ''' 25 | the result of tf3_5.py is: 26 | [[ 3.0904665 ] 27 | [ 1.2236414 ] 28 | [ 1.72707319] 29 | [ 2.23050475]] 30 | w1: 31 | [[-0.81131822 1.48459876 0.06532937] 32 | [-2.4427042 0.0992484 0.59122431]] 33 | w2: 34 | [[-0.81131822] 35 | [ 1.48459876] 36 | [ 0.06532937]] 37 | 38 | ''' 39 | 40 | -------------------------------------------------------------------------------- /tf/tf3_6.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | #0导入模块,生成模拟数据集。 3 | import tensorflow as tf 4 | import numpy as np 5 | BATCH_SIZE = 8 6 | SEED = 23455 7 | 8 | #基于seed产生随机数 9 | rdm = np.random.RandomState(SEED) 10 | #随机数返回32行2列的矩阵 表示32组 体积和重量 作为输入数据集 11 | X = rdm.rand(32,2) 12 | #从X这个32行2列的矩阵中 取出一行 判断如果和小于1 给Y赋值1 如果和不小于1 给Y赋值0 13 | #作为输入数据集的标签(正确答案) 14 | Y_ = [[int(x0 + x1 < 1)] for (x0, x1) in X] 15 | print "X:\n",X 16 | print "Y_:\n",Y_ 17 | 18 | #1定义神经网络的输入、参数和输出,定义前向传播过程。 19 | x = tf.placeholder(tf.float32, shape=(None, 2)) 20 | y_= tf.placeholder(tf.float32, shape=(None, 1)) 21 | 22 | w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1)) 23 | w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1)) 24 | 25 | a = tf.matmul(x, w1) 26 | y = tf.matmul(a, w2) 27 | 28 | #2定义损失函数及反向传播方法。 29 | loss_mse = tf.reduce_mean(tf.square(y-y_)) 30 | train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mse) 31 | #train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(loss_mse) 32 | #train_step = tf.train.AdamOptimizer(0.001).minimize(loss_mse) 33 | 34 | #3生成会话,训练STEPS轮 35 | with tf.Session() as sess: 36 | init_op = tf.global_variables_initializer() 37 | sess.run(init_op) 38 | # 输出目前(未经训练)的参数取值。 39 | print "w1:\n", sess.run(w1) 40 | print "w2:\n", sess.run(w2) 41 | print "\n" 42 | 43 | # 训练模型。 44 | STEPS = 3000 45 | for i in range(STEPS): 46 | start = (i*BATCH_SIZE) % 32 47 | end = start + BATCH_SIZE 48 | sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]}) 49 | if i % 500 == 0: 50 | total_loss = sess.run(loss_mse, feed_dict={x: X, y_: Y_}) 51 | print("After %d training step(s), loss_mse on all data is %g" % (i, total_loss)) 52 | 53 | # 输出训练后的参数取值。 54 | print "\n" 55 | print "w1:\n", sess.run(w1) 56 | print "w2:\n", sess.run(w2) 57 | 58 | """ 59 | X: 60 | [[ 0.83494319 0.11482951] 61 | [ 0.66899751 0.46594987] 62 | [ 0.60181666 0.58838408] 63 | [ 0.31836656 0.20502072] 64 | [ 0.87043944 0.02679395] 65 | [ 0.41539811 0.43938369] 66 | [ 0.68635684 0.24833404] 67 | [ 0.97315228 0.68541849] 68 | [ 0.03081617 0.89479913] 69 | [ 0.24665715 0.28584862] 70 | [ 0.31375667 0.47718349] 71 | [ 0.56689254 0.77079148] 72 | [ 0.7321604 0.35828963] 73 | [ 0.15724842 0.94294584] 74 | [ 0.34933722 0.84634483] 75 | [ 0.50304053 0.81299619] 76 | [ 0.23869886 0.9895604 ] 77 | [ 0.4636501 0.32531094] 78 | [ 0.36510487 0.97365522] 79 | [ 0.73350238 0.83833013] 80 | [ 0.61810158 0.12580353] 81 | [ 0.59274817 0.18779828] 82 | [ 0.87150299 0.34679501] 83 | [ 0.25883219 0.50002932] 84 | [ 0.75690948 0.83429824] 85 | [ 0.29316649 0.05646578] 86 | [ 0.10409134 0.88235166] 87 | [ 0.06727785 0.57784761] 88 | [ 0.38492705 0.48384792] 89 | [ 0.69234428 0.19687348] 90 | [ 0.42783492 0.73416985] 91 | [ 0.09696069 0.04883936]] 92 | Y_: 93 | [[1], [0], [0], [1], [1], [1], [1], [0], [1], [1], [1], [0], [0], [0], [0], [0], [0], [1], [0], [0], [1], [1], [0], [1], [0], [1], [1], [1], [1], [1], [0], [1]] 94 | w1: 95 | [[-0.81131822 1.48459876 0.06532937] 96 | [-2.4427042 0.0992484 0.59122431]] 97 | w2: 98 | [[-0.81131822] 99 | [ 1.48459876] 100 | [ 0.06532937]] 101 | 102 | 103 | After 0 training step(s), loss_mse on all data is 5.13118 104 | After 500 training step(s), loss_mse on all data is 0.429111 105 | After 1000 training step(s), loss_mse on all data is 0.409789 106 | After 1500 training step(s), loss_mse on all data is 0.399923 107 | After 2000 training step(s), loss_mse on all data is 0.394146 108 | After 2500 training step(s), loss_mse on all data is 0.390597 109 | 110 | 111 | w1: 112 | [[-0.70006633 0.9136318 0.08953571] 113 | [-2.3402493 -0.14641267 0.58823055]] 114 | w2: 115 | [[-0.06024267] 116 | [ 0.91956186] 117 | [-0.0682071 ]] 118 | """ 119 | 120 | -------------------------------------------------------------------------------- /vgg.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/vgg.zip -------------------------------------------------------------------------------- /vgg/Nclasses.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding:utf-8 3 | # 每个图像的真实标签,以及对应的索引值 4 | labels = { 5 | 0: 'tench\n Tinca tinca', 6 | 1: 'goldfish\n Carassius auratus', 7 | 2: 'great white shark\n white shark\n man-eater\n man-eating shark\n Carcharodon carcharias', 8 | 3: 'tiger shark\n Galeocerdo cuvieri', 9 | 4: 'hammerhead\n hammerhead shark', 10 | 5: 'electric ray\n crampfish\n numbfish\n torpedo', 11 | 6: 'stingray', 12 | 7: 'cock', 13 | 8: 'hen', 14 | 9: 'ostrich\n Struthio camelus', 15 | 10: 'brambling\n Fringilla montifringilla', 16 | 11: 'goldfinch\n Carduelis carduelis', 17 | 12: 'house finch\n linnet\n Carpodacus mexicanus', 18 | 13: 'junco\n snowbird', 19 | 14: 'indigo bunting\n indigo finch\n indigo bird\n Passerina cyanea', 20 | 15: 'robin\n American robin\n Turdus migratorius', 21 | 16: 'bulbul', 22 | 17: 'jay', 23 | 18: 'magpie', 24 | 19: 'chickadee', 25 | 20: 'water ouzel\n dipper', 26 | 21: 'kite', 27 | 22: 'bald eagle\n American eagle\n Haliaeetus leucocephalus', 28 | 23: 'vulture', 29 | 24: 'great grey owl\n great gray owl\n Strix nebulosa', 30 | 25: 'European fire salamander\n Salamandra salamandra', 31 | 26: 'common newt\n Triturus vulgaris', 32 | 27: 'eft', 33 | 28: 'spotted salamander\n Ambystoma maculatum', 34 | 29: 'axolotl\n mud puppy\n Ambystoma mexicanum', 35 | 30: 'bullfrog\n Rana catesbeiana', 36 | 31: 'tree frog\n tree-frog', 37 | 32: 'tailed frog\n bell toad\n ribbed toad\n tailed toad\n Ascaphus trui', 38 | 33: 'loggerhead\n loggerhead turtle\n Caretta caretta', 39 | 34: 'leatherback turtle\n leatherback\n leathery turtle\n Dermochelys coriacea', 40 | 35: 'mud turtle', 41 | 36: 'terrapin', 42 | 37: 'box turtle\n box tortoise', 43 | 38: 'banded gecko', 44 | 39: 'common iguana\n iguana\n Iguana iguana', 45 | 40: 'American chameleon\n anole\n Anolis carolinensis', 46 | 41: 'whiptail\n whiptail lizard', 47 | 42: 'agama', 48 | 43: 'frilled lizard\n Chlamydosaurus kingi', 49 | 44: 'alligator lizard', 50 | 45: 'Gila monster\n Heloderma suspectum', 51 | 46: 'green lizard\n Lacerta viridis', 52 | 47: 'African chameleon\n Chamaeleo chamaeleon', 53 | 48: 'Komodo dragon\n Komodo lizard\n dragon lizard\n giant lizard\n Varanus komodoensis', 54 | 49: 'African crocodile\n Nile crocodile\n Crocodylus niloticus', 55 | 50: 'American alligator\n Alligator mississipiensis', 56 | 51: 'triceratops', 57 | 52: 'thunder snake\n worm snake\n Carphophis amoenus', 58 | 53: 'ringneck snake\n ring-necked snake\n ring snake', 59 | 54: 'hognose snake\n puff adder\n sand viper', 60 | 55: 'green snake\n grass snake', 61 | 56: 'king snake\n kingsnake', 62 | 57: 'garter snake\n grass snake', 63 | 58: 'water snake', 64 | 59: 'vine snake', 65 | 60: 'night snake\n Hypsiglena torquata', 66 | 61: 'boa constrictor\n Constrictor constrictor', 67 | 62: 'rock python\n rock snake\n Python sebae', 68 | 63: 'Indian cobra\n Naja naja', 69 | 64: 'green mamba', 70 | 65: 'sea snake', 71 | 66: 'horned viper\n cerastes\n sand viper\n horned asp\n Cerastes cornutus', 72 | 67: 'diamondback\n diamondback rattlesnake\n Crotalus adamanteus', 73 | 68: 'sidewinder\n horned rattlesnake\n Crotalus cerastes', 74 | 69: 'trilobite', 75 | 70: 'harvestman\n daddy longlegs\n Phalangium opilio', 76 | 71: 'scorpion', 77 | 72: 'black and gold garden spider\n Argiope aurantia', 78 | 73: 'barn spider\n Araneus cavaticus', 79 | 74: 'garden spider\n Aranea diademata', 80 | 75: 'black widow\n Latrodectus mactans', 81 | 76: 'tarantula', 82 | 77: 'wolf spider\n hunting spider', 83 | 78: 'tick', 84 | 79: 'centipede', 85 | 80: 'black grouse', 86 | 81: 'ptarmigan', 87 | 82: 'ruffed grouse\n partridge\n Bonasa umbellus', 88 | 83: 'prairie chicken\n prairie grouse\n prairie fowl', 89 | 84: 'peacock', 90 | 85: 'quail', 91 | 86: 'partridge', 92 | 87: 'African grey\n African gray\n Psittacus erithacus', 93 | 88: 'macaw', 94 | 89: 'sulphur-crested cockatoo\n Kakatoe galerita\n Cacatua galerita', 95 | 90: 'lorikeet', 96 | 91: 'coucal', 97 | 92: 'bee eater', 98 | 93: 'hornbill', 99 | 94: 'hummingbird', 100 | 95: 'jacamar', 101 | 96: 'toucan', 102 | 97: 'drake', 103 | 98: 'red-breasted merganser\n Mergus serrator', 104 | 99: 'goose', 105 | 100: 'black swan\n Cygnus atratus', 106 | 101: 'tusker', 107 | 102: 'echidna\n spiny anteater\n anteater', 108 | 103: 'platypus\n duckbill\n duckbilled platypus\n duck-billed platypus\n Ornithorhynchus anatinus', 109 | 104: 'wallaby\n brush kangaroo', 110 | 105: 'koala\n koala bear\n kangaroo bear\n native bear\n Phascolarctos cinereus', 111 | 106: 'wombat', 112 | 107: 'jellyfish', 113 | 108: 'sea anemone\n anemone', 114 | 109: 'brain coral', 115 | 110: 'flatworm\n platyhelminth', 116 | 111: 'nematode\n nematode worm\n roundworm', 117 | 112: 'conch', 118 | 113: 'snail', 119 | 114: 'slug', 120 | 115: 'sea slug\n nudibranch', 121 | 116: 'chiton\n coat-of-mail shell\n sea cradle\n polyplacophore', 122 | 117: 'chambered nautilus\n pearly nautilus\n nautilus', 123 | 118: 'Dungeness crab\n Cancer magister', 124 | 119: 'rock crab\n Cancer irroratus', 125 | 120: 'fiddler crab', 126 | 121: 'king crab\n Alaska crab\n Alaskan king crab\n Alaska king crab\n Paralithodes camtschatica', 127 | 122: 'American lobster\n Northern lobster\n Maine lobster\n Homarus americanus', 128 | 123: 'spiny lobster\n langouste\n rock lobster\n crawfish\n crayfish\n sea crawfish', 129 | 124: 'crayfish\n crawfish\n crawdad\n crawdaddy', 130 | 125: 'hermit crab', 131 | 126: 'isopod', 132 | 127: 'white stork\n Ciconia ciconia', 133 | 128: 'black stork\n Ciconia nigra', 134 | 129: 'spoonbill', 135 | 130: 'flamingo', 136 | 131: 'little blue heron\n Egretta caerulea', 137 | 132: 'American egret\n great white heron\n Egretta albus', 138 | 133: 'bittern', 139 | 134: 'crane', 140 | 135: 'limpkin\n Aramus pictus', 141 | 136: 'European gallinule\n Porphyrio porphyrio', 142 | 137: 'American coot\n marsh hen\n mud hen\n water hen\n Fulica americana', 143 | 138: 'bustard', 144 | 139: 'ruddy turnstone\n Arenaria interpres', 145 | 140: 'red-backed sandpiper\n dunlin\n Erolia alpina', 146 | 141: 'redshank\n Tringa totanus', 147 | 142: 'dowitcher', 148 | 143: 'oystercatcher\n oyster catcher', 149 | 144: 'pelican', 150 | 145: 'king penguin\n Aptenodytes patagonica', 151 | 146: 'albatross\n mollymawk', 152 | 147: 'grey whale\n gray whale\n devilfish\n Eschrichtius gibbosus\n Eschrichtius robustus', 153 | 148: 'killer whale\n killer\n orca\n grampus\n sea wolf\n Orcinus orca', 154 | 149: 'dugong\n Dugong dugon', 155 | 150: 'sea lion', 156 | 151: 'Chihuahua', 157 | 152: 'Japanese spaniel', 158 | 153: 'Maltese dog\n Maltese terrier\n Maltese', 159 | 154: 'Pekinese\n Pekingese\n Peke', 160 | 155: 'Shih-Tzu', 161 | 156: 'Blenheim spaniel', 162 | 157: 'papillon', 163 | 158: 'toy terrier', 164 | 159: 'Rhodesian ridgeback', 165 | 160: 'Afghan hound\n Afghan', 166 | 161: 'basset\n basset hound', 167 | 162: 'beagle', 168 | 163: 'bloodhound\n sleuthhound', 169 | 164: 'bluetick', 170 | 165: 'black-and-tan coonhound', 171 | 166: 'Walker hound\n Walker foxhound', 172 | 167: 'English foxhound', 173 | 168: 'redbone', 174 | 169: 'borzoi\n Russian wolfhound', 175 | 170: 'Irish wolfhound', 176 | 171: 'Italian greyhound', 177 | 172: 'whippet', 178 | 173: 'Ibizan hound\n Ibizan Podenco', 179 | 174: 'Norwegian elkhound\n elkhound', 180 | 175: 'otterhound\n otter hound', 181 | 176: 'Saluki\n gazelle hound', 182 | 177: 'Scottish deerhound\n deerhound', 183 | 178: 'Weimaraner', 184 | 179: 'Staffordshire bullterrier\n Staffordshire bull terrier', 185 | 180: 'American Staffordshire terrier\n Staffordshire terrier\n American pit bull terrier\n pit bull terrier', 186 | 181: 'Bedlington terrier', 187 | 182: 'Border terrier', 188 | 183: 'Kerry blue terrier', 189 | 184: 'Irish terrier', 190 | 185: 'Norfolk terrier', 191 | 186: 'Norwich terrier', 192 | 187: 'Yorkshire terrier', 193 | 188: 'wire-haired fox terrier', 194 | 189: 'Lakeland terrier', 195 | 190: 'Sealyham terrier\n Sealyham', 196 | 191: 'Airedale\n Airedale terrier', 197 | 192: 'cairn\n cairn terrier', 198 | 193: 'Australian terrier', 199 | 194: 'Dandie Dinmont\n Dandie Dinmont terrier', 200 | 195: 'Boston bull\n Boston terrier', 201 | 196: 'miniature schnauzer', 202 | 197: 'giant schnauzer', 203 | 198: 'standard schnauzer', 204 | 199: 'Scotch terrier\n Scottish terrier\n Scottie', 205 | 200: 'Tibetan terrier\n chrysanthemum dog', 206 | 201: 'silky terrier\n Sydney silky', 207 | 202: 'soft-coated wheaten terrier', 208 | 203: 'West Highland white terrier', 209 | 204: 'Lhasa\n Lhasa apso', 210 | 205: 'flat-coated retriever', 211 | 206: 'curly-coated retriever', 212 | 207: 'golden retriever', 213 | 208: 'Labrador retriever', 214 | 209: 'Chesapeake Bay retriever', 215 | 210: 'German short-haired pointer', 216 | 211: 'vizsla\n Hungarian pointer', 217 | 212: 'English setter', 218 | 213: 'Irish setter\n red setter', 219 | 214: 'Gordon setter', 220 | 215: 'Brittany spaniel', 221 | 216: 'clumber\n clumber spaniel', 222 | 217: 'English springer\n English springer spaniel', 223 | 218: 'Welsh springer spaniel', 224 | 219: 'cocker spaniel\n English cocker spaniel\n cocker', 225 | 220: 'Sussex spaniel', 226 | 221: 'Irish water spaniel', 227 | 222: 'kuvasz', 228 | 223: 'schipperke', 229 | 224: 'groenendael', 230 | 225: 'malinois', 231 | 226: 'briard', 232 | 227: 'kelpie', 233 | 228: 'komondor', 234 | 229: 'Old English sheepdog\n bobtail', 235 | 230: 'Shetland sheepdog\n Shetland sheep dog\n Shetland', 236 | 231: 'collie', 237 | 232: 'Border collie', 238 | 233: 'Bouvier des Flandres\n Bouviers des Flandres', 239 | 234: 'Rottweiler', 240 | 235: 'German shepherd\n German shepherd dog\n German police dog\n alsatian', 241 | 236: 'Doberman\n Doberman pinscher', 242 | 237: 'miniature pinscher', 243 | 238: 'Greater Swiss Mountain dog', 244 | 239: 'Bernese mountain dog', 245 | 240: 'Appenzeller', 246 | 241: 'EntleBucher', 247 | 242: 'boxer', 248 | 243: 'bull mastiff', 249 | 244: 'Tibetan mastiff', 250 | 245: 'French bulldog', 251 | 246: 'Great Dane', 252 | 247: 'Saint Bernard\n St Bernard', 253 | 248: 'Eskimo dog\n husky', 254 | 249: 'malamute\n malemute\n Alaskan malamute', 255 | 250: 'Siberian husky', 256 | 251: 'dalmatian\n coach dog\n carriage dog', 257 | 252: 'affenpinscher\n monkey pinscher\n monkey dog', 258 | 253: 'basenji', 259 | 254: 'pug\n pug-dog', 260 | 255: 'Leonberg', 261 | 256: 'Newfoundland\n Newfoundland dog', 262 | 257: 'Great Pyrenees', 263 | 258: 'Samoyed\n Samoyede', 264 | 259: 'Pomeranian', 265 | 260: 'chow\n chow chow', 266 | 261: 'keeshond', 267 | 262: 'Brabancon griffon', 268 | 263: 'Pembroke\n Pembroke Welsh corgi', 269 | 264: 'Cardigan\n Cardigan Welsh corgi', 270 | 265: 'toy poodle', 271 | 266: 'miniature poodle', 272 | 267: 'standard poodle', 273 | 268: 'Mexican hairless', 274 | 269: 'timber wolf\n grey wolf\n gray wolf\n Canis lupus', 275 | 270: 'white wolf\n Arctic wolf\n Canis lupus tundrarum', 276 | 271: 'red wolf\n maned wolf\n Canis rufus\n Canis niger', 277 | 272: 'coyote\n prairie wolf\n brush wolf\n Canis latrans', 278 | 273: 'dingo\n warrigal\n warragal\n Canis dingo', 279 | 274: 'dhole\n Cuon alpinus', 280 | 275: 'African hunting dog\n hyena dog\n Cape hunting dog\n Lycaon pictus', 281 | 276: 'hyena\n hyaena', 282 | 277: 'red fox\n Vulpes vulpes', 283 | 278: 'kit fox\n Vulpes macrotis', 284 | 279: 'Arctic fox\n white fox\n Alopex lagopus', 285 | 280: 'grey fox\n gray fox\n Urocyon cinereoargenteus', 286 | 281: 'tabby\n tabby cat', 287 | 282: 'tiger cat', 288 | 283: 'Persian cat', 289 | 284: 'Siamese cat\n Siamese', 290 | 285: 'Egyptian cat', 291 | 286: 'cougar\n puma\n catamount\n mountain lion\n painter\n panther\n Felis concolor', 292 | 287: 'lynx\n catamount', 293 | 288: 'leopard\n Panthera pardus', 294 | 289: 'snow leopard\n ounce\n Panthera uncia', 295 | 290: 'jaguar\n panther\n Panthera onca\n Felis onca', 296 | 291: 'lion\n king of beasts\n Panthera leo', 297 | 292: 'tiger\n Panthera tigris', 298 | 293: 'cheetah\n chetah\n Acinonyx jubatus', 299 | 294: 'brown bear\n bruin\n Ursus arctos', 300 | 295: 'American black bear\n black bear\n Ursus americanus\n Euarctos americanus', 301 | 296: 'ice bear\n polar bear\n Ursus Maritimus\n Thalarctos maritimus', 302 | 297: 'sloth bear\n Melursus ursinus\n Ursus ursinus', 303 | 298: 'mongoose', 304 | 299: 'meerkat\n mierkat', 305 | 300: 'tiger beetle', 306 | 301: 'ladybug\n ladybeetle\n lady beetle\n ladybird\n ladybird beetle', 307 | 302: 'ground beetle\n carabid beetle', 308 | 303: 'long-horned beetle\n longicorn\n longicorn beetle', 309 | 304: 'leaf beetle\n chrysomelid', 310 | 305: 'dung beetle', 311 | 306: 'rhinoceros beetle', 312 | 307: 'weevil', 313 | 308: 'fly', 314 | 309: 'bee', 315 | 310: 'ant\n emmet\n pismire', 316 | 311: 'grasshopper\n hopper', 317 | 312: 'cricket', 318 | 313: 'walking stick\n walkingstick\n stick insect', 319 | 314: 'cockroach\n roach', 320 | 315: 'mantis\n mantid', 321 | 316: 'cicada\n cicala', 322 | 317: 'leafhopper', 323 | 318: 'lacewing\n lacewing fly', 324 | 319: "dragonfly\n darning needle\n devil's darning needle\n sewing needle\n snake feeder\n snake doctor\n mosquito hawk\n skeeter hawk", 325 | 320: 'damselfly', 326 | 321: 'admiral', 327 | 322: 'ringlet\n ringlet butterfly', 328 | 323: 'monarch\n monarch butterfly\n milkweed butterfly\n Danaus plexippus', 329 | 324: 'cabbage butterfly', 330 | 325: 'sulphur butterfly\n sulfur butterfly', 331 | 326: 'lycaenid\n lycaenid butterfly', 332 | 327: 'starfish\n sea star', 333 | 328: 'sea urchin', 334 | 329: 'sea cucumber\n holothurian', 335 | 330: 'wood rabbit\n cottontail\n cottontail rabbit', 336 | 331: 'hare', 337 | 332: 'Angora\n Angora rabbit', 338 | 333: 'hamster', 339 | 334: 'porcupine\n hedgehog', 340 | 335: 'fox squirrel\n eastern fox squirrel\n Sciurus niger', 341 | 336: 'marmot', 342 | 337: 'beaver', 343 | 338: 'guinea pig\n Cavia cobaya', 344 | 339: 'sorrel', 345 | 340: 'zebra', 346 | 341: 'hog\n pig\n grunter\n squealer\n Sus scrofa', 347 | 342: 'wild boar\n boar\n Sus scrofa', 348 | 343: 'warthog', 349 | 344: 'hippopotamus\n hippo\n river horse\n Hippopotamus amphibius', 350 | 345: 'ox', 351 | 346: 'water buffalo\n water ox\n Asiatic buffalo\n Bubalus bubalis', 352 | 347: 'bison', 353 | 348: 'ram\n tup', 354 | 349: 'bighorn\n bighorn sheep\n cimarron\n Rocky Mountain bighorn\n Rocky Mountain sheep\n Ovis canadensis', 355 | 350: 'ibex\n Capra ibex', 356 | 351: 'hartebeest', 357 | 352: 'impala\n Aepyceros melampus', 358 | 353: 'gazelle', 359 | 354: 'Arabian camel\n dromedary\n Camelus dromedarius', 360 | 355: 'llama', 361 | 356: 'weasel', 362 | 357: 'mink', 363 | 358: 'polecat\n fitch\n foulmart\n foumart\n Mustela putorius', 364 | 359: 'black-footed ferret\n ferret\n Mustela nigripes', 365 | 360: 'otter', 366 | 361: 'skunk\n polecat\n wood pussy', 367 | 362: 'badger', 368 | 363: 'armadillo', 369 | 364: 'three-toed sloth\n ai\n Bradypus tridactylus', 370 | 365: 'orangutan\n orang\n orangutang\n Pongo pygmaeus', 371 | 366: 'gorilla\n Gorilla gorilla', 372 | 367: 'chimpanzee\n chimp\n Pan troglodytes', 373 | 368: 'gibbon\n Hylobates lar', 374 | 369: 'siamang\n Hylobates syndactylus\n Symphalangus syndactylus', 375 | 370: 'guenon\n guenon monkey', 376 | 371: 'patas\n hussar monkey\n Erythrocebus patas', 377 | 372: 'baboon', 378 | 373: 'macaque', 379 | 374: 'langur', 380 | 375: 'colobus\n colobus monkey', 381 | 376: 'proboscis monkey\n Nasalis larvatus', 382 | 377: 'marmoset', 383 | 378: 'capuchin\n ringtail\n Cebus capucinus', 384 | 379: 'howler monkey\n howler', 385 | 380: 'titi\n titi monkey', 386 | 381: 'spider monkey\n Ateles geoffroyi', 387 | 382: 'squirrel monkey\n Saimiri sciureus', 388 | 383: 'Madagascar cat\n ring-tailed lemur\n Lemur catta', 389 | 384: 'indri\n indris\n Indri indri\n Indri brevicaudatus', 390 | 385: 'Indian elephant\n Elephas maximus', 391 | 386: 'African elephant\n Loxodonta africana', 392 | 387: 'lesser panda\n red panda\n panda\n bear cat\n cat bear\n Ailurus fulgens', 393 | 388: 'giant panda\n panda\n panda bear\n coon bear\n Ailuropoda melanoleuca', 394 | 389: 'barracouta\n snoek', 395 | 390: 'eel', 396 | 391: 'coho\n cohoe\n coho salmon\n blue jack\n silver salmon\n Oncorhynchus kisutch', 397 | 392: 'rock beauty\n Holocanthus tricolor', 398 | 393: 'anemone fish', 399 | 394: 'sturgeon', 400 | 395: 'gar\n garfish\n garpike\n billfish\n Lepisosteus osseus', 401 | 396: 'lionfish', 402 | 397: 'puffer\n pufferfish\n blowfish\n globefish', 403 | 398: 'abacus', 404 | 399: 'abaya', 405 | 400: "academic gown\n academic robe\n judge's robe", 406 | 401: 'accordion\n piano accordion\n squeeze box', 407 | 402: 'acoustic guitar', 408 | 403: 'aircraft carrier\n carrier\n flattop\n attack aircraft carrier', 409 | 404: 'airliner', 410 | 405: 'airship\n dirigible', 411 | 406: 'altar', 412 | 407: 'ambulance', 413 | 408: 'amphibian\n amphibious vehicle', 414 | 409: 'analog clock', 415 | 410: 'apiary\n bee house', 416 | 411: 'apron', 417 | 412: 'ashcan\n trash can\n garbage can\n wastebin\n ash bin\n ash-bin\n ashbin\n dustbin\n trash barrel\n trash bin', 418 | 413: 'assault rifle\n assault gun', 419 | 414: 'backpack\n back pack\n knapsack\n packsack\n rucksack\n haversack', 420 | 415: 'bakery\n bakeshop\n bakehouse', 421 | 416: 'balance beam\n beam', 422 | 417: 'balloon', 423 | 418: 'ballpoint\n ballpoint pen\n ballpen\n Biro', 424 | 419: 'Band Aid', 425 | 420: 'banjo', 426 | 421: 'bannister\n banister\n balustrade\n balusters\n handrail', 427 | 422: 'barbell', 428 | 423: 'barber chair', 429 | 424: 'barbershop', 430 | 425: 'barn', 431 | 426: 'barometer', 432 | 427: 'barrel\n cask', 433 | 428: 'barrow\n garden cart\n lawn cart\n wheelbarrow', 434 | 429: 'baseball', 435 | 430: 'basketball', 436 | 431: 'bassinet', 437 | 432: 'bassoon', 438 | 433: 'bathing cap\n swimming cap', 439 | 434: 'bath towel', 440 | 435: 'bathtub\n bathing tub\n bath\n tub', 441 | 436: 'beach wagon\n station wagon\n wagon\n estate car\n beach waggon\n station waggon\n waggon', 442 | 437: 'beacon\n lighthouse\n beacon light\n pharos', 443 | 438: 'beaker', 444 | 439: 'bearskin\n busby\n shako', 445 | 440: 'beer bottle', 446 | 441: 'beer glass', 447 | 442: 'bell cote\n bell cot', 448 | 443: 'bib', 449 | 444: 'bicycle-built-for-two\n tandem bicycle\n tandem', 450 | 445: 'bikini\n two-piece', 451 | 446: 'binder\n ring-binder', 452 | 447: 'binoculars\n field glasses\n opera glasses', 453 | 448: 'birdhouse', 454 | 449: 'boathouse', 455 | 450: 'bobsled\n bobsleigh\n bob', 456 | 451: 'bolo tie\n bolo\n bola tie\n bola', 457 | 452: 'bonnet\n poke bonnet', 458 | 453: 'bookcase', 459 | 454: 'bookshop\n bookstore\n bookstall', 460 | 455: 'bottlecap', 461 | 456: 'bow', 462 | 457: 'bow tie\n bow-tie\n bowtie', 463 | 458: 'brass\n memorial tablet\n plaque', 464 | 459: 'brassiere\n bra\n bandeau', 465 | 460: 'breakwater\n groin\n groyne\n mole\n bulwark\n seawall\n jetty', 466 | 461: 'breastplate\n aegis\n egis', 467 | 462: 'broom', 468 | 463: 'bucket\n pail', 469 | 464: 'buckle', 470 | 465: 'bulletproof vest', 471 | 466: 'bullet train\n bullet', 472 | 467: 'butcher shop\n meat market', 473 | 468: 'cab\n hack\n taxi\n taxicab', 474 | 469: 'caldron\n cauldron', 475 | 470: 'candle\n taper\n wax light', 476 | 471: 'cannon', 477 | 472: 'canoe', 478 | 473: 'can opener\n tin opener', 479 | 474: 'cardigan', 480 | 475: 'car mirror', 481 | 476: 'carousel\n carrousel\n merry-go-round\n roundabout\n whirligig', 482 | 477: "carpenter's kit\n tool kit", 483 | 478: 'carton', 484 | 479: 'car wheel', 485 | 480: 'cash machine\n cash dispenser\n automated teller machine\n automatic teller machine\n automated teller\n automatic teller\n ATM', 486 | 481: 'cassette', 487 | 482: 'cassette player', 488 | 483: 'castle', 489 | 484: 'catamaran', 490 | 485: 'CD player', 491 | 486: 'cello\n violoncello', 492 | 487: 'cellular telephone\n cellular phone\n cellphone\n cell\n mobile phone', 493 | 488: 'chain', 494 | 489: 'chainlink fence', 495 | 490: 'chain mail\n ring mail\n mail\n chain armor\n chain armour\n ring armor\n ring armour', 496 | 491: 'chain saw\n chainsaw', 497 | 492: 'chest', 498 | 493: 'chiffonier\n commode', 499 | 494: 'chime\n bell\n gong', 500 | 495: 'china cabinet\n china closet', 501 | 496: 'Christmas stocking', 502 | 497: 'church\n church building', 503 | 498: 'cinema\n movie theater\n movie theatre\n movie house\n picture palace', 504 | 499: 'cleaver\n meat cleaver\n chopper', 505 | 500: 'cliff dwelling', 506 | 501: 'cloak', 507 | 502: 'clog\n geta\n patten\n sabot', 508 | 503: 'cocktail shaker', 509 | 504: 'coffee mug', 510 | 505: 'coffeepot', 511 | 506: 'coil\n spiral\n volute\n whorl\n helix', 512 | 507: 'combination lock', 513 | 508: 'computer keyboard\n keypad', 514 | 509: 'confectionery\n confectionary\n candy store', 515 | 510: 'container ship\n containership\n container vessel', 516 | 511: 'convertible', 517 | 512: 'corkscrew\n bottle screw', 518 | 513: 'cornet\n horn\n trumpet\n trump', 519 | 514: 'cowboy boot', 520 | 515: 'cowboy hat\n ten-gallon hat', 521 | 516: 'cradle', 522 | 517: 'crane', 523 | 518: 'crash helmet', 524 | 519: 'crate', 525 | 520: 'crib\n cot', 526 | 521: 'Crock Pot', 527 | 522: 'croquet ball', 528 | 523: 'crutch', 529 | 524: 'cuirass', 530 | 525: 'dam\n dike\n dyke', 531 | 526: 'desk', 532 | 527: 'desktop computer', 533 | 528: 'dial telephone\n dial phone', 534 | 529: 'diaper\n nappy\n napkin', 535 | 530: 'digital clock', 536 | 531: 'digital watch', 537 | 532: 'dining table\n board', 538 | 533: 'dishrag\n dishcloth', 539 | 534: 'dishwasher\n dish washer\n dishwashing machine', 540 | 535: 'disk brake\n disc brake', 541 | 536: 'dock\n dockage\n docking facility', 542 | 537: 'dogsled\n dog sled\n dog sleigh', 543 | 538: 'dome', 544 | 539: 'doormat\n welcome mat', 545 | 540: 'drilling platform\n offshore rig', 546 | 541: 'drum\n membranophone\n tympan', 547 | 542: 'drumstick', 548 | 543: 'dumbbell', 549 | 544: 'Dutch oven', 550 | 545: 'electric fan\n blower', 551 | 546: 'electric guitar', 552 | 547: 'electric locomotive', 553 | 548: 'entertainment center', 554 | 549: 'envelope', 555 | 550: 'espresso maker', 556 | 551: 'face powder', 557 | 552: 'feather boa\n boa', 558 | 553: 'file\n file cabinet\n filing cabinet', 559 | 554: 'fireboat', 560 | 555: 'fire engine\n fire truck', 561 | 556: 'fire screen\n fireguard', 562 | 557: 'flagpole\n flagstaff', 563 | 558: 'flute\n transverse flute', 564 | 559: 'folding chair', 565 | 560: 'football helmet', 566 | 561: 'forklift', 567 | 562: 'fountain', 568 | 563: 'fountain pen', 569 | 564: 'four-poster', 570 | 565: 'freight car', 571 | 566: 'French horn\n horn', 572 | 567: 'frying pan\n frypan\n skillet', 573 | 568: 'fur coat', 574 | 569: 'garbage truck\n dustcart', 575 | 570: 'gasmask\n respirator\n gas helmet', 576 | 571: 'gas pump\n gasoline pump\n petrol pump\n island dispenser', 577 | 572: 'goblet', 578 | 573: 'go-kart', 579 | 574: 'golf ball', 580 | 575: 'golfcart\n golf cart', 581 | 576: 'gondola', 582 | 577: 'gong\n tam-tam', 583 | 578: 'gown', 584 | 579: 'grand piano\n grand', 585 | 580: 'greenhouse\n nursery\n glasshouse', 586 | 581: 'grille\n radiator grille', 587 | 582: 'grocery store\n grocery\n food market\n market', 588 | 583: 'guillotine', 589 | 584: 'hair slide', 590 | 585: 'hair spray', 591 | 586: 'half track', 592 | 587: 'hammer', 593 | 588: 'hamper', 594 | 589: 'hand blower\n blow dryer\n blow drier\n hair dryer\n hair drier', 595 | 590: 'hand-held computer\n hand-held microcomputer', 596 | 591: 'handkerchief\n hankie\n hanky\n hankey', 597 | 592: 'hard disc\n hard disk\n fixed disk', 598 | 593: 'harmonica\n mouth organ\n harp\n mouth harp', 599 | 594: 'harp', 600 | 595: 'harvester\n reaper', 601 | 596: 'hatchet', 602 | 597: 'holster', 603 | 598: 'home theater\n home theatre', 604 | 599: 'honeycomb', 605 | 600: 'hook\n claw', 606 | 601: 'hoopskirt\n crinoline', 607 | 602: 'horizontal bar\n high bar', 608 | 603: 'horse cart\n horse-cart', 609 | 604: 'hourglass', 610 | 605: 'iPod', 611 | 606: 'iron\n smoothing iron', 612 | 607: "jack-o'-lantern", 613 | 608: 'jean\n blue jean\n denim', 614 | 609: 'jeep\n landrover', 615 | 610: 'jersey\n T-shirt\n tee shirt', 616 | 611: 'jigsaw puzzle', 617 | 612: 'jinrikisha\n ricksha\n rickshaw', 618 | 613: 'joystick', 619 | 614: 'kimono', 620 | 615: 'knee pad', 621 | 616: 'knot', 622 | 617: 'lab coat\n laboratory coat', 623 | 618: 'ladle', 624 | 619: 'lampshade\n lamp shade', 625 | 620: 'laptop\n laptop computer', 626 | 621: 'lawn mower\n mower', 627 | 622: 'lens cap\n lens cover', 628 | 623: 'letter opener\n paper knife\n paperknife', 629 | 624: 'library', 630 | 625: 'lifeboat', 631 | 626: 'lighter\n light\n igniter\n ignitor', 632 | 627: 'limousine\n limo', 633 | 628: 'liner\n ocean liner', 634 | 629: 'lipstick\n lip rouge', 635 | 630: 'Loafer', 636 | 631: 'lotion', 637 | 632: 'loudspeaker\n speaker\n speaker unit\n loudspeaker system\n speaker system', 638 | 633: "loupe\n jeweler's loupe", 639 | 634: 'lumbermill\n sawmill', 640 | 635: 'magnetic compass', 641 | 636: 'mailbag\n postbag', 642 | 637: 'mailbox\n letter box', 643 | 638: 'maillot', 644 | 639: 'maillot\n tank suit', 645 | 640: 'manhole cover', 646 | 641: 'maraca', 647 | 642: 'marimba\n xylophone', 648 | 643: 'mask', 649 | 644: 'matchstick', 650 | 645: 'maypole', 651 | 646: 'maze\n labyrinth', 652 | 647: 'measuring cup', 653 | 648: 'medicine chest\n medicine cabinet', 654 | 649: 'megalith\n megalithic structure', 655 | 650: 'microphone\n mike', 656 | 651: 'microwave\n microwave oven', 657 | 652: 'military uniform', 658 | 653: 'milk can', 659 | 654: 'minibus', 660 | 655: 'miniskirt\n mini', 661 | 656: 'minivan', 662 | 657: 'missile', 663 | 658: 'mitten', 664 | 659: 'mixing bowl', 665 | 660: 'mobile home\n manufactured home', 666 | 661: 'Model T', 667 | 662: 'modem', 668 | 663: 'monastery', 669 | 664: 'monitor', 670 | 665: 'moped', 671 | 666: 'mortar', 672 | 667: 'mortarboard', 673 | 668: 'mosque', 674 | 669: 'mosquito net', 675 | 670: 'motor scooter\n scooter', 676 | 671: 'mountain bike\n all-terrain bike\n off-roader', 677 | 672: 'mountain tent', 678 | 673: 'mouse\n computer mouse', 679 | 674: 'mousetrap', 680 | 675: 'moving van', 681 | 676: 'muzzle', 682 | 677: 'nail', 683 | 678: 'neck brace', 684 | 679: 'necklace', 685 | 680: 'nipple', 686 | 681: 'notebook\n notebook computer', 687 | 682: 'obelisk', 688 | 683: 'oboe\n hautboy\n hautbois', 689 | 684: 'ocarina\n sweet potato', 690 | 685: 'odometer\n hodometer\n mileometer\n milometer', 691 | 686: 'oil filter', 692 | 687: 'organ\n pipe organ', 693 | 688: 'oscilloscope\n scope\n cathode-ray oscilloscope\n CRO', 694 | 689: 'overskirt', 695 | 690: 'oxcart', 696 | 691: 'oxygen mask', 697 | 692: 'packet', 698 | 693: 'paddle\n boat paddle', 699 | 694: 'paddlewheel\n paddle wheel', 700 | 695: 'padlock', 701 | 696: 'paintbrush', 702 | 697: "pajama\n pyjama\n pj's\n jammies", 703 | 698: 'palace', 704 | 699: 'panpipe\n pandean pipe\n syrinx', 705 | 700: 'paper towel', 706 | 701: 'parachute\n chute', 707 | 702: 'parallel bars\n bars', 708 | 703: 'park bench', 709 | 704: 'parking meter', 710 | 705: 'passenger car\n coach\n carriage', 711 | 706: 'patio\n terrace', 712 | 707: 'pay-phone\n pay-station', 713 | 708: 'pedestal\n plinth\n footstall', 714 | 709: 'pencil box\n pencil case', 715 | 710: 'pencil sharpener', 716 | 711: 'perfume\n essence', 717 | 712: 'Petri dish', 718 | 713: 'photocopier', 719 | 714: 'pick\n plectrum\n plectron', 720 | 715: 'pickelhaube', 721 | 716: 'picket fence\n paling', 722 | 717: 'pickup\n pickup truck', 723 | 718: 'pier', 724 | 719: 'piggy bank\n penny bank', 725 | 720: 'pill bottle', 726 | 721: 'pillow', 727 | 722: 'ping-pong ball', 728 | 723: 'pinwheel', 729 | 724: 'pirate\n pirate ship', 730 | 725: 'pitcher\n ewer', 731 | 726: "plane\n carpenter's plane\n woodworking plane", 732 | 727: 'planetarium', 733 | 728: 'plastic bag', 734 | 729: 'plate rack', 735 | 730: 'plow\n plough', 736 | 731: "plunger\n plumber's helper", 737 | 732: 'Polaroid camera\n Polaroid Land camera', 738 | 733: 'pole', 739 | 734: 'police van\n police wagon\n paddy wagon\n patrol wagon\n wagon\n black Maria', 740 | 735: 'poncho', 741 | 736: 'pool table\n billiard table\n snooker table', 742 | 737: 'pop bottle\n soda bottle', 743 | 738: 'pot\n flowerpot', 744 | 739: "potter's wheel", 745 | 740: 'power drill', 746 | 741: 'prayer rug\n prayer mat', 747 | 742: 'printer', 748 | 743: 'prison\n prison house', 749 | 744: 'projectile\n missile', 750 | 745: 'projector', 751 | 746: 'puck\n hockey puck', 752 | 747: 'punching bag\n punch bag\n punching ball\n punchball', 753 | 748: 'purse', 754 | 749: 'quill\n quill pen', 755 | 750: 'quilt\n comforter\n comfort\n puff', 756 | 751: 'racer\n race car\n racing car', 757 | 752: 'racket\n racquet', 758 | 753: 'radiator', 759 | 754: 'radio\n wireless', 760 | 755: 'radio telescope\n radio reflector', 761 | 756: 'rain barrel', 762 | 757: 'recreational vehicle\n RV\n R.V.', 763 | 758: 'reel', 764 | 759: 'reflex camera', 765 | 760: 'refrigerator\n icebox', 766 | 761: 'remote control\n remote', 767 | 762: 'restaurant\n eating house\n eating place\n eatery', 768 | 763: 'revolver\n six-gun\n six-shooter', 769 | 764: 'rifle', 770 | 765: 'rocking chair\n rocker', 771 | 766: 'rotisserie', 772 | 767: 'rubber eraser\n rubber\n pencil eraser', 773 | 768: 'rugby ball', 774 | 769: 'rule\n ruler', 775 | 770: 'running shoe', 776 | 771: 'safe', 777 | 772: 'safety pin', 778 | 773: 'saltshaker\n salt shaker', 779 | 774: 'sandal', 780 | 775: 'sarong', 781 | 776: 'sax\n saxophone', 782 | 777: 'scabbard', 783 | 778: 'scale\n weighing machine', 784 | 779: 'school bus', 785 | 780: 'schooner', 786 | 781: 'scoreboard', 787 | 782: 'screen\n CRT screen', 788 | 783: 'screw', 789 | 784: 'screwdriver', 790 | 785: 'seat belt\n seatbelt', 791 | 786: 'sewing machine', 792 | 787: 'shield\n buckler', 793 | 788: 'shoe shop\n shoe-shop\n shoe store', 794 | 789: 'shoji', 795 | 790: 'shopping basket', 796 | 791: 'shopping cart', 797 | 792: 'shovel', 798 | 793: 'shower cap', 799 | 794: 'shower curtain', 800 | 795: 'ski', 801 | 796: 'ski mask', 802 | 797: 'sleeping bag', 803 | 798: 'slide rule\n slipstick', 804 | 799: 'sliding door', 805 | 800: 'slot\n one-armed bandit', 806 | 801: 'snorkel', 807 | 802: 'snowmobile', 808 | 803: 'snowplow\n snowplough', 809 | 804: 'soap dispenser', 810 | 805: 'soccer ball', 811 | 806: 'sock', 812 | 807: 'solar dish\n solar collector\n solar furnace', 813 | 808: 'sombrero', 814 | 809: 'soup bowl', 815 | 810: 'space bar', 816 | 811: 'space heater', 817 | 812: 'space shuttle', 818 | 813: 'spatula', 819 | 814: 'speedboat', 820 | 815: "spider web\n spider's web", 821 | 816: 'spindle', 822 | 817: 'sports car\n sport car', 823 | 818: 'spotlight\n spot', 824 | 819: 'stage', 825 | 820: 'steam locomotive', 826 | 821: 'steel arch bridge', 827 | 822: 'steel drum', 828 | 823: 'stethoscope', 829 | 824: 'stole', 830 | 825: 'stone wall', 831 | 826: 'stopwatch\n stop watch', 832 | 827: 'stove', 833 | 828: 'strainer', 834 | 829: 'streetcar\n tram\n tramcar\n trolley\n trolley car', 835 | 830: 'stretcher', 836 | 831: 'studio couch\n day bed', 837 | 832: 'stupa\n tope', 838 | 833: 'submarine\n pigboat\n sub\n U-boat', 839 | 834: 'suit\n suit of clothes', 840 | 835: 'sundial', 841 | 836: 'sunglass', 842 | 837: 'sunglasses\n dark glasses\n shades', 843 | 838: 'sunscreen\n sunblock\n sun blocker', 844 | 839: 'suspension bridge', 845 | 840: 'swab\n swob\n mop', 846 | 841: 'sweatshirt', 847 | 842: 'swimming trunks\n bathing trunks', 848 | 843: 'swing', 849 | 844: 'switch\n electric switch\n electrical switch', 850 | 845: 'syringe', 851 | 846: 'table lamp', 852 | 847: 'tank\n army tank\n armored combat vehicle\n armoured combat vehicle', 853 | 848: 'tape player', 854 | 849: 'teapot', 855 | 850: 'teddy\n teddy bear', 856 | 851: 'television\n television system', 857 | 852: 'tennis ball', 858 | 853: 'thatch\n thatched roof', 859 | 854: 'theater curtain\n theatre curtain', 860 | 855: 'thimble', 861 | 856: 'thresher\n thrasher\n threshing machine', 862 | 857: 'throne', 863 | 858: 'tile roof', 864 | 859: 'toaster', 865 | 860: 'tobacco shop\n tobacconist shop\n tobacconist', 866 | 861: 'toilet seat', 867 | 862: 'torch', 868 | 863: 'totem pole', 869 | 864: 'tow truck\n tow car\n wrecker', 870 | 865: 'toyshop', 871 | 866: 'tractor', 872 | 867: 'trailer truck\n tractor trailer\n trucking rig\n rig\n articulated lorry\n semi', 873 | 868: 'tray', 874 | 869: 'trench coat', 875 | 870: 'tricycle\n trike\n velocipede', 876 | 871: 'trimaran', 877 | 872: 'tripod', 878 | 873: 'triumphal arch', 879 | 874: 'trolleybus\n trolley coach\n trackless trolley', 880 | 875: 'trombone', 881 | 876: 'tub\n vat', 882 | 877: 'turnstile', 883 | 878: 'typewriter keyboard', 884 | 879: 'umbrella', 885 | 880: 'unicycle\n monocycle', 886 | 881: 'upright\n upright piano', 887 | 882: 'vacuum\n vacuum cleaner', 888 | 883: 'vase', 889 | 884: 'vault', 890 | 885: 'velvet', 891 | 886: 'vending machine', 892 | 887: 'vestment', 893 | 888: 'viaduct', 894 | 889: 'violin\n fiddle', 895 | 890: 'volleyball', 896 | 891: 'waffle iron', 897 | 892: 'wall clock', 898 | 893: 'wallet\n billfold\n notecase\n pocketbook', 899 | 894: 'wardrobe\n closet\n press', 900 | 895: 'warplane\n military plane', 901 | 896: 'washbasin\n handbasin\n washbowl\n lavabo\n wash-hand basin', 902 | 897: 'washer\n automatic washer\n washing machine', 903 | 898: 'water bottle', 904 | 899: 'water jug', 905 | 900: 'water tower', 906 | 901: 'whiskey jug', 907 | 902: 'whistle', 908 | 903: 'wig', 909 | 904: 'window screen', 910 | 905: 'window shade', 911 | 906: 'Windsor tie', 912 | 907: 'wine bottle', 913 | 908: 'wing', 914 | 909: 'wok', 915 | 910: 'wooden spoon', 916 | 911: 'wool\n woolen\n woollen', 917 | 912: 'worm fence\n snake fence\n snake-rail fence\n Virginia fence', 918 | 913: 'wreck', 919 | 914: 'yawl', 920 | 915: 'yurt', 921 | 916: 'web site\n website\n internet site\n site', 922 | 917: 'comic book', 923 | 918: 'crossword puzzle\n crossword', 924 | 919: 'street sign', 925 | 920: 'traffic light\n traffic signal\n stoplight', 926 | 921: 'book jacket\n dust cover\n dust jacket\n dust wrapper', 927 | 922: 'menu', 928 | 923: 'plate', 929 | 924: 'guacamole', 930 | 925: 'consomme', 931 | 926: 'hot pot\n hotpot', 932 | 927: 'trifle', 933 | 928: 'ice cream\n icecream', 934 | 929: 'ice lolly\n lolly\n lollipop\n popsicle', 935 | 930: 'French loaf', 936 | 931: 'bagel\n beigel', 937 | 932: 'pretzel', 938 | 933: 'cheeseburger', 939 | 934: 'hotdog\n hot dog\n red hot', 940 | 935: 'mashed potato', 941 | 936: 'head cabbage', 942 | 937: 'broccoli', 943 | 938: 'cauliflower', 944 | 939: 'zucchini\n courgette', 945 | 940: 'spaghetti squash', 946 | 941: 'acorn squash', 947 | 942: 'butternut squash', 948 | 943: 'cucumber\n cuke', 949 | 944: 'artichoke\n globe artichoke', 950 | 945: 'bell pepper', 951 | 946: 'cardoon', 952 | 947: 'mushroom', 953 | 948: 'Granny Smith', 954 | 949: 'strawberry', 955 | 950: 'orange', 956 | 951: 'lemon', 957 | 952: 'fig', 958 | 953: 'pineapple\n ananas', 959 | 954: 'banana', 960 | 955: 'jackfruit\n jak\n jack', 961 | 956: 'custard apple', 962 | 957: 'pomegranate', 963 | 958: 'hay', 964 | 959: 'carbonara', 965 | 960: 'chocolate sauce\n chocolate syrup', 966 | 961: 'dough', 967 | 962: 'meat loaf\n meatloaf', 968 | 963: 'pizza\n pizza pie', 969 | 964: 'potpie', 970 | 965: 'burrito', 971 | 966: 'red wine', 972 | 967: 'espresso', 973 | 968: 'cup', 974 | 969: 'eggnog', 975 | 970: 'alp', 976 | 971: 'bubble', 977 | 972: 'cliff\n drop\n drop-off', 978 | 973: 'coral reef', 979 | 974: 'geyser', 980 | 975: 'lakeside\n lakeshore', 981 | 976: 'promontory\n headland\n head\n foreland', 982 | 977: 'sandbar\n sand bar', 983 | 978: 'seashore\n coast\n seacoast\n sea-coast', 984 | 979: 'valley\n vale', 985 | 980: 'volcano', 986 | 981: 'ballplayer\n baseball player', 987 | 982: 'groom\n bridegroom', 988 | 983: 'scuba diver', 989 | 984: 'rapeseed', 990 | 985: 'daisy', 991 | 986: "yellow lady's slipper\n yellow lady-slipper\n Cypripedium calceolus\n Cypripedium parviflorum", 992 | 987: 'corn', 993 | 988: 'acorn', 994 | 989: 'hip\n rose hip\n rosehip', 995 | 990: 'buckeye\n horse chestnut\n conker', 996 | 991: 'coral fungus', 997 | 992: 'agaric', 998 | 993: 'gyromitra', 999 | 994: 'stinkhorn\n carrion fungus', 1000 | 995: 'earthstar', 1001 | 996: 'hen-of-the-woods\n hen of the woods\n Polyporus frondosus\n Grifola frondosa', 1002 | 997: 'bolete', 1003 | 998: 'ear\n spike\n capitulum', 1004 | 999: 'toilet tissue\n toilet paper\n bathroom tissue'} 1005 | -------------------------------------------------------------------------------- /vgg/app.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import numpy as np 3 | import tensorflow as tf 4 | import matplotlib.pyplot as plt 5 | import vgg16 6 | import utils 7 | from Nclasses import labels 8 | 9 | img_path = raw_input('Input the path and image name:') 10 | img_ready = utils.load_image(img_path) 11 | 12 | fig=plt.figure(u"Top-5 预测结果") 13 | 14 | with tf.Session() as sess: 15 | images = tf.placeholder(tf.float32, [1, 224, 224, 3]) 16 | vgg = vgg16.Vgg16() 17 | vgg.forward(images) 18 | probability = sess.run(vgg.prob, feed_dict={images:img_ready}) 19 | top5 = np.argsort(probability[0])[-1:-6:-1] 20 | print "top5:",top5 21 | values = [] 22 | bar_label = [] 23 | for n, i in enumerate(top5): 24 | print "n:",n 25 | print "i:",i 26 | values.append(probability[0][i]) 27 | bar_label.append(labels[i]) 28 | print i, ":", labels[i], "----", utils.percent(probability[0][i]) 29 | 30 | ax = fig.add_subplot(111) 31 | ax.bar(range(len(values)), values, tick_label=bar_label, width=0.5, fc='g') 32 | ax.set_ylabel(u'probabilityit') 33 | ax.set_title(u'Top-5') 34 | for a,b in zip(range(len(values)), values): 35 | ax.text(a, b+0.0005, utils.percent(b), ha='center', va = 'bottom', fontsize=7) 36 | plt.show() 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /vgg/pic/a.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/vgg/pic/a.jpg -------------------------------------------------------------------------------- /vgg/pic/b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/vgg/pic/b.jpg -------------------------------------------------------------------------------- /vgg/pic/c.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/vgg/pic/c.jpg -------------------------------------------------------------------------------- /vgg/pic/d.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cj0012/AI-Practice-Tensorflow-Notes/0c0db64a4ff88774023ed74541e70eed922cc88e/vgg/pic/d.jpg -------------------------------------------------------------------------------- /vgg/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding:utf-8 3 | from skimage import io, transform 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | import tensorflow as tf 7 | from pylab import mpl 8 | 9 | mpl.rcParams['font.sans-serif']=['SimHei'] # 正常显示中文标签 10 | mpl.rcParams['axes.unicode_minus']=False # 正常显示正负号 11 | 12 | def load_image(path): 13 | fig = plt.figure("Centre and Resize") 14 | img = io.imread(path) 15 | img = img / 255.0 16 | 17 | ax0 = fig.add_subplot(131) 18 | ax0.set_xlabel(u'Original Picture') 19 | ax0.imshow(img) 20 | 21 | short_edge = min(img.shape[:2]) 22 | y = (img.shape[0] - short_edge) / 2 23 | x = (img.shape[1] - short_edge) / 2 24 | crop_img = img[y:y+short_edge, x:x+short_edge] 25 | 26 | ax1 = fig.add_subplot(132) 27 | ax1.set_xlabel(u"Centre Picture") 28 | ax1.imshow(crop_img) 29 | 30 | re_img = transform.resize(crop_img, (224, 224)) 31 | 32 | ax2 = fig.add_subplot(133) 33 | ax2.set_xlabel(u"Resize Picture") 34 | ax2.imshow(re_img) 35 | 36 | img_ready = re_img.reshape((1, 224, 224, 3)) 37 | 38 | return img_ready 39 | 40 | def percent(value): 41 | return '%.2f%%' % (value * 100) 42 | 43 | -------------------------------------------------------------------------------- /vgg/vgg16.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding:utf-8 3 | 4 | import inspect 5 | import os 6 | import numpy as np 7 | import tensorflow as tf 8 | import time 9 | import matplotlib.pyplot as plt 10 | 11 | VGG_MEAN = [103.939, 116.779, 123.68] 12 | 13 | class Vgg16(): 14 | def __init__(self, vgg16_path=None): 15 | if vgg16_path is None: 16 | vgg16_path = os.path.join(os.getcwd(), "vgg16.npy") 17 | self.data_dict = np.load(vgg16_path, encoding='latin1').item() 18 | 19 | def forward(self, images): 20 | 21 | print("build model started") 22 | start_time = time.time() 23 | rgb_scaled = images * 255.0 24 | red, green, blue = tf.split(rgb_scaled,3,3) 25 | bgr = tf.concat([ 26 | blue - VGG_MEAN[0], 27 | green - VGG_MEAN[1], 28 | red - VGG_MEAN[2]],3) 29 | 30 | self.conv1_1 = self.conv_layer(bgr, "conv1_1") 31 | self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") 32 | self.pool1 = self.max_pool_2x2(self.conv1_2, "pool1") 33 | 34 | self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") 35 | self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") 36 | self.pool2 = self.max_pool_2x2(self.conv2_2, "pool2") 37 | 38 | self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") 39 | self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") 40 | self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") 41 | self.pool3 = self.max_pool_2x2(self.conv3_3, "pool3") 42 | 43 | self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") 44 | self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") 45 | self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") 46 | self.pool4 = self.max_pool_2x2(self.conv4_3, "pool4") 47 | 48 | self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") 49 | self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") 50 | self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") 51 | self.pool5 = self.max_pool_2x2(self.conv5_3, "pool5") 52 | 53 | self.fc6 = self.fc_layer(self.pool5, "fc6") 54 | self.relu6 = tf.nn.relu(self.fc6) 55 | 56 | self.fc7 = self.fc_layer(self.relu6, "fc7") 57 | self.relu7 = tf.nn.relu(self.fc7) 58 | 59 | self.fc8 = self.fc_layer(self.relu7, "fc8") 60 | self.prob = tf.nn.softmax(self.fc8, name="prob") 61 | 62 | end_time = time.time() 63 | print(("time consuming: %f" % (end_time-start_time))) 64 | 65 | self.data_dict = None 66 | 67 | def conv_layer(self, x, name): 68 | with tf.variable_scope(name): 69 | w = self.get_conv_filter(name) 70 | conv = tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME') 71 | conv_biases = self.get_bias(name) 72 | result = tf.nn.relu(tf.nn.bias_add(conv, conv_biases)) 73 | return result 74 | 75 | def get_conv_filter(self, name): 76 | return tf.constant(self.data_dict[name][0], name="filter") 77 | 78 | def get_bias(self, name): 79 | return tf.constant(self.data_dict[name][1], name="biases") 80 | 81 | def max_pool_2x2(self, x, name): 82 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) 83 | 84 | def fc_layer(self, x, name): 85 | with tf.variable_scope(name): 86 | shape = x.get_shape().as_list() 87 | dim = 1 88 | for i in shape[1:]: 89 | dim *= i 90 | x = tf.reshape(x, [-1, dim]) 91 | w = self.get_fc_weight(name) 92 | b = self.get_bias(name) 93 | 94 | result = tf.nn.bias_add(tf.matmul(x, w), b) 95 | return result 96 | 97 | def get_fc_weight(self, name): 98 | return tf.constant(self.data_dict[name][0], name="weights") 99 | 100 | --------------------------------------------------------------------------------