├── .gitignore ├── 1_Introduction ├── basic_operations.py └── helloworld.py ├── 2_BasicModels ├── linear_regression.py ├── logistic_regression.py └── nearest_neighbor.py ├── 3_NeuralNetworks ├── autoencoder.py ├── bidirectional_rnn.py ├── convolutional_network.py ├── dynamic_rnn.py ├── multilayer_perceptron.py └── recurrent_network.py ├── 4_Utils ├── save_restore_model.py ├── tensorboard_advanced.py └── tensorboard_basic.py ├── 5_MultiGPU └── multigpu_basics.py ├── LICENSE ├── README.md ├── input_data.py └── tensorboard └── mnist_tensorboard.py /.gitignore: -------------------------------------------------------------------------------- 1 | .floydexpt 2 | .floydignore -------------------------------------------------------------------------------- /1_Introduction/basic_operations.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Basic Operations example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Basic constant operations 13 | # The value returned by the constructor represents the output 14 | # of the Constant op. 15 | a = tf.constant(2) 16 | b = tf.constant(3) 17 | 18 | # Launch the default graph. 19 | with tf.Session() as sess: 20 | print("a=2, b=3") 21 | print("Addition with constants: %i" % sess.run(a+b)) 22 | print("Multiplication with constants: %i" % sess.run(a*b)) 23 | 24 | # Basic Operations with variable as graph input 25 | # The value returned by the constructor represents the output 26 | # of the Variable op. (define as input when running session) 27 | # tf Graph input 28 | a = tf.placeholder(tf.int16) 29 | b = tf.placeholder(tf.int16) 30 | 31 | # Define some operations 32 | add = tf.add(a, b) 33 | mul = tf.multiply(a, b) 34 | 35 | # Launch the default graph. 36 | with tf.Session() as sess: 37 | # Run every operation with variable input 38 | print("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})) 39 | print("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})) 40 | 41 | 42 | # ---------------- 43 | # More in details: 44 | # Matrix Multiplication from TensorFlow official tutorial 45 | 46 | # Create a Constant op that produces a 1x2 matrix. The op is 47 | # added as a node to the default graph. 48 | # 49 | # The value returned by the constructor represents the output 50 | # of the Constant op. 51 | matrix1 = tf.constant([[3., 3.]]) 52 | 53 | # Create another Constant that produces a 2x1 matrix. 54 | matrix2 = tf.constant([[2.],[2.]]) 55 | 56 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs. 57 | # The returned value, 'product', represents the result of the matrix 58 | # multiplication. 59 | product = tf.matmul(matrix1, matrix2) 60 | 61 | # To run the matmul op we call the session 'run()' method, passing 'product' 62 | # which represents the output of the matmul op. This indicates to the call 63 | # that we want to get the output of the matmul op back. 64 | # 65 | # All inputs needed by the op are run automatically by the session. They 66 | # typically are run in parallel. 67 | # 68 | # The call 'run(product)' thus causes the execution of threes ops in the 69 | # graph: the two constants and matmul. 70 | # 71 | # The output of the op is returned in 'result' as a numpy `ndarray` object. 72 | with tf.Session() as sess: 73 | result = sess.run(product) 74 | print(result) 75 | # ==> [[ 12.]] 76 | -------------------------------------------------------------------------------- /1_Introduction/helloworld.py: -------------------------------------------------------------------------------- 1 | ''' 2 | HelloWorld example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Simple hello world using TensorFlow 13 | 14 | # Create a Constant op 15 | # The op is added as a node to the default graph. 16 | # 17 | # The value returned by the constructor represents the output 18 | # of the Constant op. 19 | hello = tf.constant('Hello, TensorFlow!') 20 | 21 | # Start tf session 22 | sess = tf.Session() 23 | 24 | # Run the op 25 | print(sess.run(hello)) 26 | -------------------------------------------------------------------------------- /2_BasicModels/linear_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A linear regression learning algorithm example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | import numpy 12 | import matplotlib.pyplot as plt 13 | rng = numpy.random 14 | 15 | # Parameters 16 | learning_rate = 0.01 17 | training_epochs = 1000 18 | display_step = 50 19 | 20 | # Training Data 21 | train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 22 | 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) 23 | train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 24 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) 25 | n_samples = train_X.shape[0] 26 | 27 | # tf Graph Input 28 | X = tf.placeholder("float") 29 | Y = tf.placeholder("float") 30 | 31 | # Set model weights 32 | W = tf.Variable(rng.randn(), name="weight") 33 | b = tf.Variable(rng.randn(), name="bias") 34 | 35 | # Construct a linear model 36 | pred = tf.add(tf.multiply(X, W), b) 37 | 38 | # Mean squared error 39 | cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples) 40 | # Gradient descent 41 | # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default 42 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 43 | 44 | # Initializing the variables 45 | init = tf.global_variables_initializer() 46 | 47 | # Launch the graph 48 | with tf.Session() as sess: 49 | sess.run(init) 50 | 51 | # Fit all training data 52 | for epoch in range(training_epochs): 53 | for (x, y) in zip(train_X, train_Y): 54 | sess.run(optimizer, feed_dict={X: x, Y: y}) 55 | 56 | # Display logs per epoch step 57 | if (epoch+1) % display_step == 0: 58 | c = sess.run(cost, feed_dict={X: train_X, Y:train_Y}) 59 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \ 60 | "W=", sess.run(W), "b=", sess.run(b)) 61 | 62 | print("Optimization Finished!") 63 | training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) 64 | print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n') 65 | 66 | # Graphic display 67 | plt.plot(train_X, train_Y, 'ro', label='Original data') 68 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 69 | plt.legend() 70 | plt.show() 71 | 72 | # Testing example, as requested (Issue #2) 73 | test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1]) 74 | test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03]) 75 | 76 | print("Testing... (Mean square loss Comparison)") 77 | testing_cost = sess.run( 78 | tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]), 79 | feed_dict={X: test_X, Y: test_Y}) # same function as cost above 80 | print("Testing cost=", testing_cost) 81 | print("Absolute mean square loss difference:", abs( 82 | training_cost - testing_cost)) 83 | 84 | plt.plot(test_X, test_Y, 'bo', label='Testing data') 85 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 86 | plt.legend() 87 | plt.show() 88 | -------------------------------------------------------------------------------- /2_BasicModels/logistic_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A logistic regression learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | 24 | # tf Graph Input 25 | x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 26 | y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes 27 | 28 | # Set model weights 29 | W = tf.Variable(tf.zeros([784, 10])) 30 | b = tf.Variable(tf.zeros([10])) 31 | 32 | # Construct model 33 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 34 | 35 | # Minimize error using cross entropy 36 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 37 | # Gradient Descent 38 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 39 | 40 | # Initializing the variables 41 | init = tf.global_variables_initializer() 42 | 43 | # Launch the graph 44 | with tf.Session() as sess: 45 | sess.run(init) 46 | 47 | # Training cycle 48 | for epoch in range(training_epochs): 49 | avg_cost = 0. 50 | total_batch = int(mnist.train.num_examples/batch_size) 51 | # Loop over all batches 52 | for i in range(total_batch): 53 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 54 | # Run optimization op (backprop) and cost op (to get loss value) 55 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, 56 | y: batch_ys}) 57 | # Compute average loss 58 | avg_cost += c / total_batch 59 | # Display logs per epoch step 60 | if (epoch+1) % display_step == 0: 61 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 62 | 63 | print("Optimization Finished!") 64 | 65 | # Test model 66 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 67 | # Calculate accuracy 68 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 69 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 70 | -------------------------------------------------------------------------------- /2_BasicModels/nearest_neighbor.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A nearest neighbor learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import numpy as np 13 | import tensorflow as tf 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | # In this example, we limit mnist data 20 | Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) 21 | Xte, Yte = mnist.test.next_batch(200) #200 for testing 22 | 23 | # tf Graph Input 24 | xtr = tf.placeholder("float", [None, 784]) 25 | xte = tf.placeholder("float", [784]) 26 | 27 | # Nearest Neighbor calculation using L1 Distance 28 | # Calculate L1 Distance 29 | distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) 30 | # Prediction: Get min distance index (Nearest neighbor) 31 | pred = tf.arg_min(distance, 0) 32 | 33 | accuracy = 0. 34 | 35 | # Initializing the variables 36 | init = tf.global_variables_initializer() 37 | 38 | # Launch the graph 39 | with tf.Session() as sess: 40 | sess.run(init) 41 | 42 | # loop over test data 43 | for i in range(len(Xte)): 44 | # Get nearest neighbor 45 | nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) 46 | # Get nearest neighbor class label and compare it to its true label 47 | print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \ 48 | "True Class:", np.argmax(Yte[i])) 49 | # Calculate accuracy 50 | if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): 51 | accuracy += 1./len(Xte) 52 | print("Done!") 53 | print("Accuracy:", accuracy) 54 | -------------------------------------------------------------------------------- /3_NeuralNetworks/autoencoder.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ Auto Encoder Example. 4 | Using an auto encoder on MNIST handwritten digits. 5 | References: 6 | Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based 7 | learning applied to document recognition." Proceedings of the IEEE, 8 | 86(11):2278-2324, November 1998. 9 | Links: 10 | [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ 11 | """ 12 | from __future__ import division, print_function, absolute_import 13 | 14 | import tensorflow as tf 15 | import numpy as np 16 | import matplotlib.pyplot as plt 17 | 18 | # Import MNIST data 19 | from tensorflow.examples.tutorials.mnist import input_data 20 | mnist = input_data.read_data_sets("MNIST_data", one_hot=True) 21 | 22 | # Parameters 23 | learning_rate = 0.01 24 | training_epochs = 20 25 | batch_size = 256 26 | display_step = 1 27 | examples_to_show = 10 28 | 29 | # Network Parameters 30 | n_hidden_1 = 256 # 1st layer num features 31 | n_hidden_2 = 128 # 2nd layer num features 32 | n_input = 784 # MNIST data input (img shape: 28*28) 33 | 34 | # tf Graph input (only pictures) 35 | X = tf.placeholder("float", [None, n_input]) 36 | 37 | weights = { 38 | 'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 39 | 'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 40 | 'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])), 41 | 'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])), 42 | } 43 | biases = { 44 | 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 45 | 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 46 | 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 47 | 'decoder_b2': tf.Variable(tf.random_normal([n_input])), 48 | } 49 | 50 | 51 | # Building the encoder 52 | def encoder(x): 53 | # Encoder Hidden layer with sigmoid activation #1 54 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), 55 | biases['encoder_b1'])) 56 | # Decoder Hidden layer with sigmoid activation #2 57 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), 58 | biases['encoder_b2'])) 59 | return layer_2 60 | 61 | 62 | # Building the decoder 63 | def decoder(x): 64 | # Encoder Hidden layer with sigmoid activation #1 65 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), 66 | biases['decoder_b1'])) 67 | # Decoder Hidden layer with sigmoid activation #2 68 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), 69 | biases['decoder_b2'])) 70 | return layer_2 71 | 72 | # Construct model 73 | encoder_op = encoder(X) 74 | decoder_op = decoder(encoder_op) 75 | 76 | # Prediction 77 | y_pred = decoder_op 78 | # Targets (Labels) are the input data. 79 | y_true = X 80 | 81 | # Define loss and optimizer, minimize the squared error 82 | cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) 83 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) 84 | 85 | # Initializing the variables 86 | init = tf.global_variables_initializer() 87 | 88 | # Launch the graph 89 | with tf.Session() as sess: 90 | sess.run(init) 91 | total_batch = int(mnist.train.num_examples/batch_size) 92 | # Training cycle 93 | for epoch in range(training_epochs): 94 | # Loop over all batches 95 | for i in range(total_batch): 96 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 97 | # Run optimization op (backprop) and cost op (to get loss value) 98 | _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) 99 | # Display logs per epoch step 100 | if epoch % display_step == 0: 101 | print("Epoch:", '%04d' % (epoch+1), 102 | "cost=", "{:.9f}".format(c)) 103 | 104 | print("Optimization Finished!") 105 | 106 | # Applying encode and decode over test set 107 | encode_decode = sess.run( 108 | y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}) 109 | # Compare original images with their reconstructions 110 | f, a = plt.subplots(2, 10, figsize=(10, 2)) 111 | for i in range(examples_to_show): 112 | a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) 113 | a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) 114 | f.show() 115 | plt.draw() 116 | plt.waitforbuttonpress() 117 | -------------------------------------------------------------------------------- /3_NeuralNetworks/bidirectional_rnn.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) 4 | Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | from tensorflow.contrib import rnn 14 | import numpy as np 15 | 16 | # Import MNIST data 17 | from tensorflow.examples.tutorials.mnist import input_data 18 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 19 | 20 | ''' 21 | To classify images using a bidirectional recurrent neural network, we consider 22 | every image row as a sequence of pixels. Because MNIST image shape is 28*28px, 23 | we will then handle 28 sequences of 28 steps for every sample. 24 | ''' 25 | 26 | # Parameters 27 | learning_rate = 0.001 28 | training_iters = 100000 29 | batch_size = 128 30 | display_step = 10 31 | 32 | # Network Parameters 33 | n_input = 28 # MNIST data input (img shape: 28*28) 34 | n_steps = 28 # timesteps 35 | n_hidden = 128 # hidden layer num of features 36 | n_classes = 10 # MNIST total classes (0-9 digits) 37 | 38 | # tf Graph input 39 | x = tf.placeholder("float", [None, n_steps, n_input]) 40 | y = tf.placeholder("float", [None, n_classes]) 41 | 42 | # Define weights 43 | weights = { 44 | # Hidden layer weights => 2*n_hidden because of forward + backward cells 45 | 'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes])) 46 | } 47 | biases = { 48 | 'out': tf.Variable(tf.random_normal([n_classes])) 49 | } 50 | 51 | 52 | def BiRNN(x, weights, biases): 53 | 54 | # Prepare data shape to match `bidirectional_rnn` function requirements 55 | # Current data input shape: (batch_size, n_steps, n_input) 56 | # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) 57 | 58 | # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input) 59 | x = tf.unstack(x, n_steps, 1) 60 | 61 | # Define lstm cells with tensorflow 62 | # Forward direction cell 63 | lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 64 | # Backward direction cell 65 | lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 66 | 67 | # Get lstm cell output 68 | try: 69 | outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 70 | dtype=tf.float32) 71 | except Exception: # Old TensorFlow version only returns outputs not states 72 | outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 73 | dtype=tf.float32) 74 | 75 | # Linear activation, using rnn inner loop last output 76 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 77 | 78 | pred = BiRNN(x, weights, biases) 79 | 80 | # Define loss and optimizer 81 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 82 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 83 | 84 | # Evaluate model 85 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) 86 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 87 | 88 | # Initializing the variables 89 | init = tf.global_variables_initializer() 90 | 91 | # Launch the graph 92 | with tf.Session() as sess: 93 | sess.run(init) 94 | step = 1 95 | # Keep training until reach max iterations 96 | while step * batch_size < training_iters: 97 | batch_x, batch_y = mnist.train.next_batch(batch_size) 98 | # Reshape data to get 28 seq of 28 elements 99 | batch_x = batch_x.reshape((batch_size, n_steps, n_input)) 100 | # Run optimization op (backprop) 101 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) 102 | if step % display_step == 0: 103 | # Calculate batch accuracy 104 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) 105 | # Calculate batch loss 106 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) 107 | print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ 108 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 109 | "{:.5f}".format(acc)) 110 | step += 1 111 | print("Optimization Finished!") 112 | 113 | # Calculate accuracy for 128 mnist test images 114 | test_len = 128 115 | test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) 116 | test_label = mnist.test.labels[:test_len] 117 | print("Testing Accuracy:", \ 118 | sess.run(accuracy, feed_dict={x: test_data, y: test_label})) 119 | -------------------------------------------------------------------------------- /3_NeuralNetworks/convolutional_network.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A Convolutional Network implementation example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | training_iters = 200000 21 | batch_size = 128 22 | display_step = 10 23 | 24 | # Network Parameters 25 | n_input = 784 # MNIST data input (img shape: 28*28) 26 | n_classes = 10 # MNIST total classes (0-9 digits) 27 | dropout = 0.75 # Dropout, probability to keep units 28 | 29 | # tf Graph input 30 | x = tf.placeholder(tf.float32, [None, n_input]) 31 | y = tf.placeholder(tf.float32, [None, n_classes]) 32 | keep_prob = tf.placeholder(tf.float32) #dropout (keep probability) 33 | 34 | 35 | # Create some wrappers for simplicity 36 | def conv2d(x, W, b, strides=1): 37 | # Conv2D wrapper, with bias and relu activation 38 | x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') 39 | x = tf.nn.bias_add(x, b) 40 | return tf.nn.relu(x) 41 | 42 | 43 | def maxpool2d(x, k=2): 44 | # MaxPool2D wrapper 45 | return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], 46 | padding='SAME') 47 | 48 | 49 | # Create model 50 | def conv_net(x, weights, biases, dropout): 51 | # Reshape input picture 52 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 53 | 54 | # Convolution Layer 55 | conv1 = conv2d(x, weights['wc1'], biases['bc1']) 56 | # Max Pooling (down-sampling) 57 | conv1 = maxpool2d(conv1, k=2) 58 | 59 | # Convolution Layer 60 | conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) 61 | # Max Pooling (down-sampling) 62 | conv2 = maxpool2d(conv2, k=2) 63 | 64 | # Fully connected layer 65 | # Reshape conv2 output to fit fully connected layer input 66 | fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) 67 | fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) 68 | fc1 = tf.nn.relu(fc1) 69 | # Apply Dropout 70 | fc1 = tf.nn.dropout(fc1, dropout) 71 | 72 | # Output, class prediction 73 | out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) 74 | return out 75 | 76 | # Store layers weight & bias 77 | weights = { 78 | # 5x5 conv, 1 input, 32 outputs 79 | 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), 80 | # 5x5 conv, 32 inputs, 64 outputs 81 | 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), 82 | # fully connected, 7*7*64 inputs, 1024 outputs 83 | 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), 84 | # 1024 inputs, 10 outputs (class prediction) 85 | 'out': tf.Variable(tf.random_normal([1024, n_classes])) 86 | } 87 | 88 | biases = { 89 | 'bc1': tf.Variable(tf.random_normal([32])), 90 | 'bc2': tf.Variable(tf.random_normal([64])), 91 | 'bd1': tf.Variable(tf.random_normal([1024])), 92 | 'out': tf.Variable(tf.random_normal([n_classes])) 93 | } 94 | 95 | # Construct model 96 | pred = conv_net(x, weights, biases, keep_prob) 97 | 98 | # Define loss and optimizer 99 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 100 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 101 | 102 | # Evaluate model 103 | correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 104 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 105 | 106 | # Initializing the variables 107 | init = tf.global_variables_initializer() 108 | 109 | # Launch the graph 110 | with tf.Session() as sess: 111 | sess.run(init) 112 | step = 1 113 | # Keep training until reach max iterations 114 | while step * batch_size < training_iters: 115 | batch_x, batch_y = mnist.train.next_batch(batch_size) 116 | # Run optimization op (backprop) 117 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, 118 | keep_prob: dropout}) 119 | if step % display_step == 0: 120 | # Calculate batch loss and accuracy 121 | loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, 122 | y: batch_y, 123 | keep_prob: 1.}) 124 | print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ 125 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 126 | "{:.5f}".format(acc)) 127 | step += 1 128 | print("Optimization Finished!") 129 | 130 | # Calculate accuracy for 256 mnist test images 131 | print("Testing Accuracy:", \ 132 | sess.run(accuracy, feed_dict={x: mnist.test.images[:256], 133 | y: mnist.test.labels[:256], 134 | keep_prob: 1.})) 135 | -------------------------------------------------------------------------------- /3_NeuralNetworks/dynamic_rnn.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A Dynamic Recurrent Neural Network (LSTM) implementation example using 3 | TensorFlow library. This example is using a toy dataset to classify linear 4 | sequences. The generated sequences have variable length. 5 | 6 | Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf 7 | 8 | Author: Aymeric Damien 9 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 10 | ''' 11 | 12 | from __future__ import print_function 13 | 14 | import tensorflow as tf 15 | import random 16 | 17 | 18 | # ==================== 19 | # TOY DATA GENERATOR 20 | # ==================== 21 | class ToySequenceData(object): 22 | """ Generate sequence of data with dynamic length. 23 | This class generate samples for training: 24 | - Class 0: linear sequences (i.e. [0, 1, 2, 3,...]) 25 | - Class 1: random sequences (i.e. [1, 3, 10, 7,...]) 26 | 27 | NOTICE: 28 | We have to pad each sequence to reach 'max_seq_len' for TensorFlow 29 | consistency (we cannot feed a numpy array with inconsistent 30 | dimensions). The dynamic calculation will then be perform thanks to 31 | 'seqlen' attribute that records every actual sequence length. 32 | """ 33 | def __init__(self, n_samples=1000, max_seq_len=20, min_seq_len=3, 34 | max_value=1000): 35 | self.data = [] 36 | self.labels = [] 37 | self.seqlen = [] 38 | for i in range(n_samples): 39 | # Random sequence length 40 | len = random.randint(min_seq_len, max_seq_len) 41 | # Monitor sequence length for TensorFlow dynamic calculation 42 | self.seqlen.append(len) 43 | # Add a random or linear int sequence (50% prob) 44 | if random.random() < .5: 45 | # Generate a linear sequence 46 | rand_start = random.randint(0, max_value - len) 47 | s = [[float(i)/max_value] for i in 48 | range(rand_start, rand_start + len)] 49 | # Pad sequence for dimension consistency 50 | s += [[0.] for i in range(max_seq_len - len)] 51 | self.data.append(s) 52 | self.labels.append([1., 0.]) 53 | else: 54 | # Generate a random sequence 55 | s = [[float(random.randint(0, max_value))/max_value] 56 | for i in range(len)] 57 | # Pad sequence for dimension consistency 58 | s += [[0.] for i in range(max_seq_len - len)] 59 | self.data.append(s) 60 | self.labels.append([0., 1.]) 61 | self.batch_id = 0 62 | 63 | def next(self, batch_size): 64 | """ Return a batch of data. When dataset end is reached, start over. 65 | """ 66 | if self.batch_id == len(self.data): 67 | self.batch_id = 0 68 | batch_data = (self.data[self.batch_id:min(self.batch_id + 69 | batch_size, len(self.data))]) 70 | batch_labels = (self.labels[self.batch_id:min(self.batch_id + 71 | batch_size, len(self.data))]) 72 | batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + 73 | batch_size, len(self.data))]) 74 | self.batch_id = min(self.batch_id + batch_size, len(self.data)) 75 | return batch_data, batch_labels, batch_seqlen 76 | 77 | 78 | # ========== 79 | # MODEL 80 | # ========== 81 | 82 | # Parameters 83 | learning_rate = 0.01 84 | training_iters = 1000000 85 | batch_size = 128 86 | display_step = 10 87 | 88 | # Network Parameters 89 | seq_max_len = 20 # Sequence max length 90 | n_hidden = 64 # hidden layer num of features 91 | n_classes = 2 # linear sequence or not 92 | 93 | trainset = ToySequenceData(n_samples=1000, max_seq_len=seq_max_len) 94 | testset = ToySequenceData(n_samples=500, max_seq_len=seq_max_len) 95 | 96 | # tf Graph input 97 | x = tf.placeholder("float", [None, seq_max_len, 1]) 98 | y = tf.placeholder("float", [None, n_classes]) 99 | # A placeholder for indicating each sequence length 100 | seqlen = tf.placeholder(tf.int32, [None]) 101 | 102 | # Define weights 103 | weights = { 104 | 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) 105 | } 106 | biases = { 107 | 'out': tf.Variable(tf.random_normal([n_classes])) 108 | } 109 | 110 | 111 | def dynamicRNN(x, seqlen, weights, biases): 112 | 113 | # Prepare data shape to match `rnn` function requirements 114 | # Current data input shape: (batch_size, n_steps, n_input) 115 | # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) 116 | 117 | # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input) 118 | x = tf.unstack(x, seq_max_len, 1) 119 | 120 | # Define a lstm cell with tensorflow 121 | lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden) 122 | 123 | # Get lstm cell output, providing 'sequence_length' will perform dynamic 124 | # calculation. 125 | outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32, 126 | sequence_length=seqlen) 127 | 128 | # When performing dynamic calculation, we must retrieve the last 129 | # dynamically computed output, i.e., if a sequence length is 10, we need 130 | # to retrieve the 10th output. 131 | # However TensorFlow doesn't support advanced indexing yet, so we build 132 | # a custom op that for each sample in batch size, get its length and 133 | # get the corresponding relevant output. 134 | 135 | # 'outputs' is a list of output at every timestep, we pack them in a Tensor 136 | # and change back dimension to [batch_size, n_step, n_input] 137 | outputs = tf.stack(outputs) 138 | outputs = tf.transpose(outputs, [1, 0, 2]) 139 | 140 | # Hack to build the indexing and retrieve the right output. 141 | batch_size = tf.shape(outputs)[0] 142 | # Start indices for each sample 143 | index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1) 144 | # Indexing 145 | outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index) 146 | 147 | # Linear activation, using outputs computed above 148 | return tf.matmul(outputs, weights['out']) + biases['out'] 149 | 150 | pred = dynamicRNN(x, seqlen, weights, biases) 151 | 152 | # Define loss and optimizer 153 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 154 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) 155 | 156 | # Evaluate model 157 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) 158 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 159 | 160 | # Initializing the variables 161 | init = tf.global_variables_initializer() 162 | 163 | # Launch the graph 164 | with tf.Session() as sess: 165 | sess.run(init) 166 | step = 1 167 | # Keep training until reach max iterations 168 | while step * batch_size < training_iters: 169 | batch_x, batch_y, batch_seqlen = trainset.next(batch_size) 170 | # Run optimization op (backprop) 171 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, 172 | seqlen: batch_seqlen}) 173 | if step % display_step == 0: 174 | # Calculate batch accuracy 175 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y, 176 | seqlen: batch_seqlen}) 177 | # Calculate batch loss 178 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y, 179 | seqlen: batch_seqlen}) 180 | print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ 181 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 182 | "{:.5f}".format(acc)) 183 | step += 1 184 | print("Optimization Finished!") 185 | 186 | # Calculate accuracy 187 | test_data = testset.data 188 | test_label = testset.labels 189 | test_seqlen = testset.seqlen 190 | print("Testing Accuracy:", \ 191 | sess.run(accuracy, feed_dict={x: test_data, y: test_label, 192 | seqlen: test_seqlen})) 193 | -------------------------------------------------------------------------------- /3_NeuralNetworks/multilayer_perceptron.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A Multilayer Perceptron implementation example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | # Import MNIST data 13 | from tensorflow.examples.tutorials.mnist import input_data 14 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 15 | 16 | import tensorflow as tf 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | training_epochs = 15 21 | batch_size = 100 22 | display_step = 1 23 | 24 | # Network Parameters 25 | n_hidden_1 = 256 # 1st layer number of features 26 | n_hidden_2 = 256 # 2nd layer number of features 27 | n_input = 784 # MNIST data input (img shape: 28*28) 28 | n_classes = 10 # MNIST total classes (0-9 digits) 29 | 30 | # tf Graph input 31 | x = tf.placeholder("float", [None, n_input]) 32 | y = tf.placeholder("float", [None, n_classes]) 33 | 34 | 35 | # Create model 36 | def multilayer_perceptron(x, weights, biases): 37 | # Hidden layer with RELU activation 38 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 39 | layer_1 = tf.nn.relu(layer_1) 40 | # Hidden layer with RELU activation 41 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 42 | layer_2 = tf.nn.relu(layer_2) 43 | # Output layer with linear activation 44 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 45 | return out_layer 46 | 47 | # Store layers weight & bias 48 | weights = { 49 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 50 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 51 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 52 | } 53 | biases = { 54 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 55 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 56 | 'out': tf.Variable(tf.random_normal([n_classes])) 57 | } 58 | 59 | # Construct model 60 | pred = multilayer_perceptron(x, weights, biases) 61 | 62 | # Define loss and optimizer 63 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 64 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 65 | 66 | # Initializing the variables 67 | init = tf.global_variables_initializer() 68 | 69 | # Launch the graph 70 | with tf.Session() as sess: 71 | sess.run(init) 72 | 73 | # Training cycle 74 | for epoch in range(training_epochs): 75 | avg_cost = 0. 76 | total_batch = int(mnist.train.num_examples/batch_size) 77 | # Loop over all batches 78 | for i in range(total_batch): 79 | batch_x, batch_y = mnist.train.next_batch(batch_size) 80 | # Run optimization op (backprop) and cost op (to get loss value) 81 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 82 | y: batch_y}) 83 | # Compute average loss 84 | avg_cost += c / total_batch 85 | # Display logs per epoch step 86 | if epoch % display_step == 0: 87 | print("Epoch:", '%04d' % (epoch+1), "cost=", \ 88 | "{:.9f}".format(avg_cost)) 89 | print("Optimization Finished!") 90 | 91 | # Test model 92 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 93 | # Calculate accuracy 94 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 95 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 96 | -------------------------------------------------------------------------------- /3_NeuralNetworks/recurrent_network.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A Recurrent Neural Network (LSTM) implementation example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) 4 | Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | from tensorflow.contrib import rnn 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | ''' 20 | To classify images using a recurrent neural network, we consider every image 21 | row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then 22 | handle 28 sequences of 28 steps for every sample. 23 | ''' 24 | 25 | # Parameters 26 | learning_rate = 0.001 27 | training_iters = 100000 28 | batch_size = 128 29 | display_step = 10 30 | 31 | # Network Parameters 32 | n_input = 28 # MNIST data input (img shape: 28*28) 33 | n_steps = 28 # timesteps 34 | n_hidden = 128 # hidden layer num of features 35 | n_classes = 10 # MNIST total classes (0-9 digits) 36 | 37 | # tf Graph input 38 | x = tf.placeholder("float", [None, n_steps, n_input]) 39 | y = tf.placeholder("float", [None, n_classes]) 40 | 41 | # Define weights 42 | weights = { 43 | 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) 44 | } 45 | biases = { 46 | 'out': tf.Variable(tf.random_normal([n_classes])) 47 | } 48 | 49 | 50 | def RNN(x, weights, biases): 51 | 52 | # Prepare data shape to match `rnn` function requirements 53 | # Current data input shape: (batch_size, n_steps, n_input) 54 | # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) 55 | 56 | # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input) 57 | x = tf.unstack(x, n_steps, 1) 58 | 59 | # Define a lstm cell with tensorflow 60 | lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 61 | 62 | # Get lstm cell output 63 | outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 64 | 65 | # Linear activation, using rnn inner loop last output 66 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 67 | 68 | pred = RNN(x, weights, biases) 69 | 70 | # Define loss and optimizer 71 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 72 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 73 | 74 | # Evaluate model 75 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) 76 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 77 | 78 | # Initializing the variables 79 | init = tf.global_variables_initializer() 80 | 81 | # Launch the graph 82 | with tf.Session() as sess: 83 | sess.run(init) 84 | step = 1 85 | # Keep training until reach max iterations 86 | while step * batch_size < training_iters: 87 | batch_x, batch_y = mnist.train.next_batch(batch_size) 88 | # Reshape data to get 28 seq of 28 elements 89 | batch_x = batch_x.reshape((batch_size, n_steps, n_input)) 90 | # Run optimization op (backprop) 91 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) 92 | if step % display_step == 0: 93 | # Calculate batch accuracy 94 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) 95 | # Calculate batch loss 96 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) 97 | print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ 98 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 99 | "{:.5f}".format(acc)) 100 | step += 1 101 | print("Optimization Finished!") 102 | 103 | # Calculate accuracy for 128 mnist test images 104 | test_len = 128 105 | test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) 106 | test_label = mnist.test.labels[:test_len] 107 | print("Testing Accuracy:", \ 108 | sess.run(accuracy, feed_dict={x: test_data, y: test_label})) 109 | -------------------------------------------------------------------------------- /4_Utils/save_restore_model.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Save and Restore a model using TensorFlow. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | # Import MNIST data 13 | from tensorflow.examples.tutorials.mnist import input_data 14 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 15 | 16 | import tensorflow as tf 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | batch_size = 100 21 | display_step = 1 22 | model_path = "/tmp/model.ckpt" 23 | 24 | # Network Parameters 25 | n_hidden_1 = 256 # 1st layer number of features 26 | n_hidden_2 = 256 # 2nd layer number of features 27 | n_input = 784 # MNIST data input (img shape: 28*28) 28 | n_classes = 10 # MNIST total classes (0-9 digits) 29 | 30 | # tf Graph input 31 | x = tf.placeholder("float", [None, n_input]) 32 | y = tf.placeholder("float", [None, n_classes]) 33 | 34 | 35 | # Create model 36 | def multilayer_perceptron(x, weights, biases): 37 | # Hidden layer with RELU activation 38 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 39 | layer_1 = tf.nn.relu(layer_1) 40 | # Hidden layer with RELU activation 41 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 42 | layer_2 = tf.nn.relu(layer_2) 43 | # Output layer with linear activation 44 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 45 | return out_layer 46 | 47 | # Store layers weight & bias 48 | weights = { 49 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 50 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 51 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 52 | } 53 | biases = { 54 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 55 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 56 | 'out': tf.Variable(tf.random_normal([n_classes])) 57 | } 58 | 59 | # Construct model 60 | pred = multilayer_perceptron(x, weights, biases) 61 | 62 | # Define loss and optimizer 63 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 64 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 65 | 66 | # Initializing the variables 67 | init = tf.global_variables_initializer() 68 | 69 | # 'Saver' op to save and restore all the variables 70 | saver = tf.train.Saver() 71 | 72 | # Running first session 73 | print("Starting 1st session...") 74 | with tf.Session() as sess: 75 | # Initialize variables 76 | sess.run(init) 77 | 78 | # Training cycle 79 | for epoch in range(3): 80 | avg_cost = 0. 81 | total_batch = int(mnist.train.num_examples/batch_size) 82 | # Loop over all batches 83 | for i in range(total_batch): 84 | batch_x, batch_y = mnist.train.next_batch(batch_size) 85 | # Run optimization op (backprop) and cost op (to get loss value) 86 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 87 | y: batch_y}) 88 | # Compute average loss 89 | avg_cost += c / total_batch 90 | # Display logs per epoch step 91 | if epoch % display_step == 0: 92 | print("Epoch:", '%04d' % (epoch+1), "cost=", \ 93 | "{:.9f}".format(avg_cost)) 94 | print("First Optimization Finished!") 95 | 96 | # Test model 97 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 98 | # Calculate accuracy 99 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 100 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 101 | 102 | # Save model weights to disk 103 | save_path = saver.save(sess, model_path) 104 | print("Model saved in file: %s" % save_path) 105 | 106 | # Running a new session 107 | print("Starting 2nd session...") 108 | with tf.Session() as sess: 109 | # Initialize variables 110 | sess.run(init) 111 | 112 | # Restore model weights from previously saved model 113 | saver.restore(sess, model_path) 114 | print("Model restored from file: %s" % save_path) 115 | 116 | # Resume training 117 | for epoch in range(7): 118 | avg_cost = 0. 119 | total_batch = int(mnist.train.num_examples / batch_size) 120 | # Loop over all batches 121 | for i in range(total_batch): 122 | batch_x, batch_y = mnist.train.next_batch(batch_size) 123 | # Run optimization op (backprop) and cost op (to get loss value) 124 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 125 | y: batch_y}) 126 | # Compute average loss 127 | avg_cost += c / total_batch 128 | # Display logs per epoch step 129 | if epoch % display_step == 0: 130 | print("Epoch:", '%04d' % (epoch + 1), "cost=", \ 131 | "{:.9f}".format(avg_cost)) 132 | print("Second Optimization Finished!") 133 | 134 | # Test model 135 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 136 | # Calculate accuracy 137 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 138 | print("Accuracy:", accuracy.eval( 139 | {x: mnist.test.images, y: mnist.test.labels})) 140 | -------------------------------------------------------------------------------- /4_Utils/tensorboard_advanced.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Graph and Loss visualization using Tensorboard. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | logs_path = '/tmp/tensorflow_logs/example' 24 | 25 | # Network Parameters 26 | n_hidden_1 = 256 # 1st layer number of features 27 | n_hidden_2 = 256 # 2nd layer number of features 28 | n_input = 784 # MNIST data input (img shape: 28*28) 29 | n_classes = 10 # MNIST total classes (0-9 digits) 30 | 31 | # tf Graph Input 32 | # mnist data image of shape 28*28=784 33 | x = tf.placeholder(tf.float32, [None, 784], name='InputData') 34 | # 0-9 digits recognition => 10 classes 35 | y = tf.placeholder(tf.float32, [None, 10], name='LabelData') 36 | 37 | 38 | # Create model 39 | def multilayer_perceptron(x, weights, biases): 40 | # Hidden layer with RELU activation 41 | layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1']) 42 | layer_1 = tf.nn.relu(layer_1) 43 | # Create a summary to visualize the first layer ReLU activation 44 | tf.summary.histogram("relu1", layer_1) 45 | # Hidden layer with RELU activation 46 | layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2']) 47 | layer_2 = tf.nn.relu(layer_2) 48 | # Create another summary to visualize the second layer ReLU activation 49 | tf.summary.histogram("relu2", layer_2) 50 | # Output layer 51 | out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3']) 52 | return out_layer 53 | 54 | # Store layers weight & bias 55 | weights = { 56 | 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'), 57 | 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'), 58 | 'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3') 59 | } 60 | biases = { 61 | 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'), 62 | 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'), 63 | 'b3': tf.Variable(tf.random_normal([n_classes]), name='b3') 64 | } 65 | 66 | # Encapsulating all ops into scopes, making Tensorboard's Graph 67 | # Visualization more convenient 68 | with tf.name_scope('Model'): 69 | # Build model 70 | pred = multilayer_perceptron(x, weights, biases) 71 | 72 | with tf.name_scope('Loss'): 73 | # Softmax Cross entropy (cost function) 74 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 75 | 76 | with tf.name_scope('SGD'): 77 | # Gradient Descent 78 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 79 | # Op to calculate every variable gradient 80 | grads = tf.gradients(loss, tf.trainable_variables()) 81 | grads = list(zip(grads, tf.trainable_variables())) 82 | # Op to update all variables according to their gradient 83 | apply_grads = optimizer.apply_gradients(grads_and_vars=grads) 84 | 85 | with tf.name_scope('Accuracy'): 86 | # Accuracy 87 | acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 88 | acc = tf.reduce_mean(tf.cast(acc, tf.float32)) 89 | 90 | # Initializing the variables 91 | init = tf.global_variables_initializer() 92 | 93 | # Create a summary to monitor cost tensor 94 | tf.summary.scalar("loss", loss) 95 | # Create a summary to monitor accuracy tensor 96 | tf.summary.scalar("accuracy", acc) 97 | # Create summaries to visualize weights 98 | for var in tf.trainable_variables(): 99 | tf.summary.histogram(var.name, var) 100 | # Summarize all gradients 101 | for grad, var in grads: 102 | tf.summary.histogram(var.name + '/gradient', grad) 103 | # Merge all summaries into a single op 104 | merged_summary_op = tf.summary.merge_all() 105 | 106 | # Launch the graph 107 | with tf.Session() as sess: 108 | sess.run(init) 109 | 110 | # op to write logs to Tensorboard 111 | summary_writer = tf.summary.FileWriter(logs_path, 112 | graph=tf.get_default_graph()) 113 | 114 | # Training cycle 115 | for epoch in range(training_epochs): 116 | avg_cost = 0. 117 | total_batch = int(mnist.train.num_examples/batch_size) 118 | # Loop over all batches 119 | for i in range(total_batch): 120 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 121 | # Run optimization op (backprop), cost op (to get loss value) 122 | # and summary nodes 123 | _, c, summary = sess.run([apply_grads, loss, merged_summary_op], 124 | feed_dict={x: batch_xs, y: batch_ys}) 125 | # Write logs at every iteration 126 | summary_writer.add_summary(summary, epoch * total_batch + i) 127 | # Compute average loss 128 | avg_cost += c / total_batch 129 | # Display logs per epoch step 130 | if (epoch+1) % display_step == 0: 131 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 132 | 133 | print("Optimization Finished!") 134 | 135 | # Test model 136 | # Calculate accuracy 137 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})) 138 | 139 | print("Run the command line:\n" \ 140 | "--> tensorboard --logdir=/tmp/tensorflow_logs " \ 141 | "\nThen open http://0.0.0.0:6006/ into your web browser") 142 | -------------------------------------------------------------------------------- /4_Utils/tensorboard_basic.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Graph and Loss visualization using Tensorboard. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | logs_path = '/tmp/tensorflow_logs/example' 24 | 25 | # tf Graph Input 26 | # mnist data image of shape 28*28=784 27 | x = tf.placeholder(tf.float32, [None, 784], name='InputData') 28 | # 0-9 digits recognition => 10 classes 29 | y = tf.placeholder(tf.float32, [None, 10], name='LabelData') 30 | 31 | # Set model weights 32 | W = tf.Variable(tf.zeros([784, 10]), name='Weights') 33 | b = tf.Variable(tf.zeros([10]), name='Bias') 34 | 35 | # Construct model and encapsulating all ops into scopes, making 36 | # Tensorboard's Graph visualization more convenient 37 | with tf.name_scope('Model'): 38 | # Model 39 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 40 | with tf.name_scope('Loss'): 41 | # Minimize error using cross entropy 42 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 43 | with tf.name_scope('SGD'): 44 | # Gradient Descent 45 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 46 | with tf.name_scope('Accuracy'): 47 | # Accuracy 48 | acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 49 | acc = tf.reduce_mean(tf.cast(acc, tf.float32)) 50 | 51 | # Initializing the variables 52 | init = tf.global_variables_initializer() 53 | 54 | # Create a summary to monitor cost tensor 55 | tf.summary.scalar("loss", cost) 56 | # Create a summary to monitor accuracy tensor 57 | tf.summary.scalar("accuracy", acc) 58 | # Merge all summaries into a single op 59 | merged_summary_op = tf.summary.merge_all() 60 | 61 | # Launch the graph 62 | with tf.Session() as sess: 63 | sess.run(init) 64 | 65 | # op to write logs to Tensorboard 66 | summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) 67 | 68 | # Training cycle 69 | for epoch in range(training_epochs): 70 | avg_cost = 0. 71 | total_batch = int(mnist.train.num_examples/batch_size) 72 | # Loop over all batches 73 | for i in range(total_batch): 74 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 75 | # Run optimization op (backprop), cost op (to get loss value) 76 | # and summary nodes 77 | _, c, summary = sess.run([optimizer, cost, merged_summary_op], 78 | feed_dict={x: batch_xs, y: batch_ys}) 79 | # Write logs at every iteration 80 | summary_writer.add_summary(summary, epoch * total_batch + i) 81 | # Compute average loss 82 | avg_cost += c / total_batch 83 | # Display logs per epoch step 84 | if (epoch+1) % display_step == 0: 85 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 86 | 87 | print("Optimization Finished!") 88 | 89 | # Test model 90 | # Calculate accuracy 91 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})) 92 | 93 | print("Run the command line:\n" \ 94 | "--> tensorboard --logdir=/tmp/tensorflow_logs " \ 95 | "\nThen open http://0.0.0.0:6006/ into your web browser") 96 | -------------------------------------------------------------------------------- /5_MultiGPU/multigpu_basics.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | ''' 3 | Basic Multi GPU computation example using TensorFlow library. 4 | 5 | Author: Aymeric Damien 6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 7 | ''' 8 | 9 | ''' 10 | This tutorial requires your machine to have 2 GPUs 11 | "/cpu:0": The CPU of your machine. 12 | "/gpu:0": The first GPU of your machine 13 | "/gpu:1": The second GPU of your machine 14 | ''' 15 | 16 | 17 | 18 | import numpy as np 19 | import tensorflow as tf 20 | import datetime 21 | 22 | # Processing Units logs 23 | log_device_placement = True 24 | 25 | # Num of multiplications to perform 26 | n = 10 27 | 28 | ''' 29 | Example: compute A^n + B^n on 2 GPUs 30 | Results on 8 cores with 2 GTX-980: 31 | * Single GPU computation time: 0:00:11.277449 32 | * Multi GPU computation time: 0:00:07.131701 33 | ''' 34 | # Create random large matrix 35 | A = np.random.rand(10000, 10000).astype('float32') 36 | B = np.random.rand(10000, 10000).astype('float32') 37 | 38 | # Create a graph to store results 39 | c1 = [] 40 | c2 = [] 41 | 42 | def matpow(M, n): 43 | if n < 1: #Abstract cases where n < 1 44 | return M 45 | else: 46 | return tf.matmul(M, matpow(M, n-1)) 47 | 48 | ''' 49 | Single GPU computing 50 | ''' 51 | with tf.device('/gpu:0'): 52 | a = tf.placeholder(tf.float32, [10000, 10000]) 53 | b = tf.placeholder(tf.float32, [10000, 10000]) 54 | # Compute A^n and B^n and store results in c1 55 | c1.append(matpow(a, n)) 56 | c1.append(matpow(b, n)) 57 | 58 | with tf.device('/cpu:0'): 59 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n 60 | 61 | t1_1 = datetime.datetime.now() 62 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 63 | # Run the op. 64 | sess.run(sum, {a:A, b:B}) 65 | t2_1 = datetime.datetime.now() 66 | 67 | 68 | ''' 69 | Multi GPU computing 70 | ''' 71 | # GPU:0 computes A^n 72 | with tf.device('/gpu:0'): 73 | # Compute A^n and store result in c2 74 | a = tf.placeholder(tf.float32, [10000, 10000]) 75 | c2.append(matpow(a, n)) 76 | 77 | # GPU:1 computes B^n 78 | with tf.device('/gpu:1'): 79 | # Compute B^n and store result in c2 80 | b = tf.placeholder(tf.float32, [10000, 10000]) 81 | c2.append(matpow(b, n)) 82 | 83 | with tf.device('/cpu:0'): 84 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n 85 | 86 | t1_2 = datetime.datetime.now() 87 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 88 | # Run the op. 89 | sess.run(sum, {a:A, b:B}) 90 | t2_2 = datetime.datetime.now() 91 | 92 | 93 | print("Single GPU computation time: " + str(t2_1-t1_1)) 94 | print("Multi GPU computation time: " + str(t2_2-t1_2)) 95 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | 21 | All contributions by Aymeric Damien: 22 | Copyright (c) 2015, Aymeric Damien. 23 | All rights reserved. 24 | 25 | All other contributions: 26 | Copyright (c) 2015, the respective contributors. 27 | All rights reserved. 28 | 29 | Each contributor holds copyright over their respective contributions. 30 | The project versioning (Git) records all such contribution source information. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow Examples 2 | 3 | Note: These examples are originally from [aymericdamien/TensorFlow-Examples](https://github.com/aymericdamien/TensorFlow-Examples) repository. 4 | 5 | TensorFlow Tutorial with popular machine learning algorithms implementation. This tutorial was designed for easily diving into TensorFlow, through examples. 6 | 7 | It is suitable for beginners who want to find clear and concise examples about TensorFlow. For readability, the tutorial includes both notebook and code with explanations. 8 | 9 | Note: If you are using older TensorFlow version (before 0.12), please have a [look here](https://github.com/aymericdamien/TensorFlow-Examples/tree/0.11) 10 | 11 | ## Tutorial index 12 | 13 | #### 0 - Prerequisite 14 | - Introduction to Machine Learning ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/ml_introduction.ipynb)) 15 | - Introduction to MNIST Dataset ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb)) 16 | 17 | #### 1 - Introduction 18 | - Hello World ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/helloworld.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/helloworld.py)) 19 | - Basic Operations ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_operations.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/1_Introduction/basic_operations.py)) 20 | 21 | #### 2 - Basic Models 22 | - Nearest Neighbor ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/nearest_neighbor.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/nearest_neighbor.py)) 23 | - Linear Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py)) 24 | - Logistic Regression ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/logistic_regression.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/logistic_regression.py)) 25 | 26 | #### 3 - Neural Networks 27 | - Multilayer Perceptron ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/multilayer_perceptron.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/multilayer_perceptron.py)) 28 | - Convolutional Neural Network ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/convolutional_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/convolutional_network.py)) 29 | - Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/recurrent_network.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py)) 30 | - Bidirectional Recurrent Neural Network (LSTM) ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/bidirectional_rnn.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/bidirectional_rnn.py)) 31 | - Dynamic Recurrent Neural Network (LSTM) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/dynamic_rnn.py)) 32 | - AutoEncoder ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/3_NeuralNetworks/autoencoder.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/autoencoder.py)) 33 | 34 | #### 4 - Utilities 35 | - Save and Restore a model ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/save_restore_model.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/save_restore_model.py)) 36 | - Tensorboard - Graph and loss visualization ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/4_Utils/tensorboard_basic.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/tensorboard_basic.py)) 37 | - Tensorboard - Advanced visualization ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/4_Utils/tensorboard_advanced.py)) 38 | 39 | #### 5 - Multi GPU 40 | - Basic Operations on multi-GPU ([notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_MultiGPU/multigpu_basics.ipynb)) ([code](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/5_MultiGPU/multigpu_basics.py)) 41 | 42 | ## Dataset 43 | Some examples require MNIST dataset for training and testing. Don't worry, this dataset will automatically be downloaded when running examples (with input_data.py). 44 | MNIST is a database of handwritten digits, for a quick description of that dataset, you can check [this notebook](https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb). 45 | 46 | Official Website: [http://yann.lecun.com/exdb/mnist/](http://yann.lecun.com/exdb/mnist/) 47 | 48 | ## More Examples 49 | The following examples are coming from [TFLearn](https://github.com/tflearn/tflearn), a library that provides a simplified interface for TensorFlow. You can have a look, there are many [examples](https://github.com/tflearn/tflearn/tree/master/examples) and [pre-built operations and layers](http://tflearn.org/doc_index/#api). 50 | 51 | ### Tutorials 52 | - [TFLearn Quickstart](https://github.com/tflearn/tflearn/blob/master/tutorials/intro/quickstart.md). Learn the basics of TFLearn through a concrete machine learning task. Build and train a deep neural network classifier. 53 | 54 | ### Basics 55 | - [Linear Regression](https://github.com/tflearn/tflearn/blob/master/examples/basics/linear_regression.py). Implement a linear regression using TFLearn. 56 | - [Logical Operators](https://github.com/tflearn/tflearn/blob/master/examples/basics/logical.py). Implement logical operators with TFLearn (also includes a usage of 'merge'). 57 | - [Weights Persistence](https://github.com/tflearn/tflearn/blob/master/examples/basics/weights_persistence.py). Save and Restore a model. 58 | - [Fine-Tuning](https://github.com/tflearn/tflearn/blob/master/examples/basics/finetuning.py). Fine-Tune a pre-trained model on a new task. 59 | - [Using HDF5](https://github.com/tflearn/tflearn/blob/master/examples/basics/use_hdf5.py). Use HDF5 to handle large datasets. 60 | - [Using DASK](https://github.com/tflearn/tflearn/blob/master/examples/basics/use_dask.py). Use DASK to handle large datasets. 61 | 62 | ### Computer Vision 63 | - [Multi-layer perceptron](https://github.com/tflearn/tflearn/blob/master/examples/images/dnn.py). A multi-layer perceptron implementation for MNIST classification task. 64 | - [Convolutional Network (MNIST)](https://github.com/tflearn/tflearn/blob/master/examples/images/convnet_mnist.py). A Convolutional neural network implementation for classifying MNIST dataset. 65 | - [Convolutional Network (CIFAR-10)](https://github.com/tflearn/tflearn/blob/master/examples/images/convnet_cifar10.py). A Convolutional neural network implementation for classifying CIFAR-10 dataset. 66 | - [Network in Network](https://github.com/tflearn/tflearn/blob/master/examples/images/network_in_network.py). 'Network in Network' implementation for classifying CIFAR-10 dataset. 67 | - [Alexnet](https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py). Apply Alexnet to Oxford Flowers 17 classification task. 68 | - [VGGNet](https://github.com/tflearn/tflearn/blob/master/examples/images/vgg_network.py). Apply VGG Network to Oxford Flowers 17 classification task. 69 | - [VGGNet Finetuning (Fast Training)](https://github.com/tflearn/tflearn/blob/master/examples/images/vgg_network_finetuning.py). Use a pre-trained VGG Network and retrain it on your own data, for fast training. 70 | - [RNN Pixels](https://github.com/tflearn/tflearn/blob/master/examples/images/rnn_pixels.py). Use RNN (over sequence of pixels) to classify images. 71 | - [Highway Network](https://github.com/tflearn/tflearn/blob/master/examples/images/highway_dnn.py). Highway Network implementation for classifying MNIST dataset. 72 | - [Highway Convolutional Network](https://github.com/tflearn/tflearn/blob/master/examples/images/convnet_highway_mnist.py). Highway Convolutional Network implementation for classifying MNIST dataset. 73 | - [Residual Network (MNIST)](https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_mnist.py). A bottleneck residual network applied to MNIST classification task. 74 | - [Residual Network (CIFAR-10)](https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_cifar10.py). A residual network applied to CIFAR-10 classification task. 75 | - [Google Inception (v3)](https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py). Google's Inception v3 network applied to Oxford Flowers 17 classification task. 76 | - [Auto Encoder](https://github.com/tflearn/tflearn/blob/master/examples/images/autoencoder.py). An auto encoder applied to MNIST handwritten digits. 77 | 78 | ### Natural Language Processing 79 | - [Recurrent Neural Network (LSTM)](https://github.com/tflearn/tflearn/blob/master/examples/nlp/lstm.py). Apply an LSTM to IMDB sentiment dataset classification task. 80 | - [Bi-Directional RNN (LSTM)](https://github.com/tflearn/tflearn/blob/master/examples/nlp/bidirectional_lstm.py). Apply a bi-directional LSTM to IMDB sentiment dataset classification task. 81 | - [Dynamic RNN (LSTM)](https://github.com/tflearn/tflearn/blob/master/examples/nlp/dynamic_lstm.py). Apply a dynamic LSTM to classify variable length text from IMDB dataset. 82 | - [City Name Generation](https://github.com/tflearn/tflearn/blob/master/examples/nlp/lstm_generator_cityname.py). Generates new US-cities name, using LSTM network. 83 | - [Shakespeare Scripts Generation](https://github.com/tflearn/tflearn/blob/master/examples/nlp/lstm_generator_shakespeare.py). Generates new Shakespeare scripts, using LSTM network. 84 | - [Seq2seq](https://github.com/tflearn/tflearn/blob/master/examples/nlp/seq2seq_example.py). Pedagogical example of seq2seq reccurent network. See [this repo](https://github.com/ichuang/tflearn_seq2seq) for full instructions. 85 | - [CNN Seq](https://github.com/tflearn/tflearn/blob/master/examples/nlp/cnn_sentence_classification.py). Apply a 1-D convolutional network to classify sequence of words from IMDB sentiment dataset. 86 | 87 | ### Reinforcement Learning 88 | - [Atari Pacman 1-step Q-Learning](https://github.com/tflearn/tflearn/blob/master/examples/reinforcement_learning/atari_1step_qlearning.py). Teach a machine to play Atari games (Pacman by default) using 1-step Q-learning. 89 | 90 | ### Others 91 | - [Recommender - Wide & Deep Network](https://github.com/tflearn/tflearn/blob/master/examples/others/recommender_wide_and_deep.py). Pedagogical example of wide & deep networks for recommender systems. 92 | 93 | ### Notebooks 94 | - [Spiral Classification Problem](https://github.com/tflearn/tflearn/blob/master/examples/notebooks/spiral.ipynb). TFLearn implementation of spiral classification problem from Stanford CS231n. 95 | 96 | ### Extending TensorFlow 97 | - [Layers](https://github.com/tflearn/tflearn/blob/master/examples/extending_tensorflow/layers.py). Use TFLearn layers along with TensorFlow. 98 | - [Trainer](https://github.com/tflearn/tflearn/blob/master/examples/extending_tensorflow/trainer.py). Use TFLearn trainer class to train any TensorFlow graph. 99 | - [Built-in Ops](https://github.com/tflearn/tflearn/blob/master/examples/extending_tensorflow/builtin_ops.py). Use TFLearn built-in operations along with TensorFlow. 100 | - [Summaries](https://github.com/tflearn/tflearn/blob/master/examples/extending_tensorflow/summaries.py). Use TFLearn summarizers along with TensorFlow. 101 | - [Variables](https://github.com/tflearn/tflearn/blob/master/examples/extending_tensorflow/variables.py). Use TFLearn variables along with TensorFlow. 102 | 103 | 104 | ## Dependencies 105 | ``` 106 | tensorflow 1.0alpha 107 | numpy 108 | matplotlib 109 | cuda 110 | tflearn (if using tflearn examples) 111 | ``` 112 | Floyd takes care of making these dependencies available at run time. 113 | 114 | -------------------------------------------------------------------------------- /input_data.py: -------------------------------------------------------------------------------- 1 | """Functions for downloading and reading MNIST data.""" 2 | from __future__ import print_function 3 | import gzip 4 | import os 5 | import urllib 6 | import numpy 7 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 8 | def maybe_download(filename, work_directory): 9 | """Download the data from Yann's website, unless it's already here.""" 10 | if not os.path.exists(work_directory): 11 | os.mkdir(work_directory) 12 | filepath = os.path.join(work_directory, filename) 13 | if not os.path.exists(filepath): 14 | filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath) 15 | statinfo = os.stat(filepath) 16 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') 17 | return filepath 18 | def _read32(bytestream): 19 | dt = numpy.dtype(numpy.uint32).newbyteorder('>') 20 | return numpy.frombuffer(bytestream.read(4), dtype=dt) 21 | def extract_images(filename): 22 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" 23 | print('Extracting', filename) 24 | with gzip.open(filename) as bytestream: 25 | magic = _read32(bytestream) 26 | if magic != 2051: 27 | raise ValueError( 28 | 'Invalid magic number %d in MNIST image file: %s' % 29 | (magic, filename)) 30 | num_images = _read32(bytestream) 31 | rows = _read32(bytestream) 32 | cols = _read32(bytestream) 33 | buf = bytestream.read(rows * cols * num_images) 34 | data = numpy.frombuffer(buf, dtype=numpy.uint8) 35 | data = data.reshape(num_images, rows, cols, 1) 36 | return data 37 | def dense_to_one_hot(labels_dense, num_classes=10): 38 | """Convert class labels from scalars to one-hot vectors.""" 39 | num_labels = labels_dense.shape[0] 40 | index_offset = numpy.arange(num_labels) * num_classes 41 | labels_one_hot = numpy.zeros((num_labels, num_classes)) 42 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 43 | return labels_one_hot 44 | def extract_labels(filename, one_hot=False): 45 | """Extract the labels into a 1D uint8 numpy array [index].""" 46 | print('Extracting', filename) 47 | with gzip.open(filename) as bytestream: 48 | magic = _read32(bytestream) 49 | if magic != 2049: 50 | raise ValueError( 51 | 'Invalid magic number %d in MNIST label file: %s' % 52 | (magic, filename)) 53 | num_items = _read32(bytestream) 54 | buf = bytestream.read(num_items) 55 | labels = numpy.frombuffer(buf, dtype=numpy.uint8) 56 | if one_hot: 57 | return dense_to_one_hot(labels) 58 | return labels 59 | class DataSet(object): 60 | def __init__(self, images, labels, fake_data=False): 61 | if fake_data: 62 | self._num_examples = 10000 63 | else: 64 | assert images.shape[0] == labels.shape[0], ( 65 | "images.shape: %s labels.shape: %s" % (images.shape, 66 | labels.shape)) 67 | self._num_examples = images.shape[0] 68 | # Convert shape from [num examples, rows, columns, depth] 69 | # to [num examples, rows*columns] (assuming depth == 1) 70 | assert images.shape[3] == 1 71 | images = images.reshape(images.shape[0], 72 | images.shape[1] * images.shape[2]) 73 | # Convert from [0, 255] -> [0.0, 1.0]. 74 | images = images.astype(numpy.float32) 75 | images = numpy.multiply(images, 1.0 / 255.0) 76 | self._images = images 77 | self._labels = labels 78 | self._epochs_completed = 0 79 | self._index_in_epoch = 0 80 | @property 81 | def images(self): 82 | return self._images 83 | @property 84 | def labels(self): 85 | return self._labels 86 | @property 87 | def num_examples(self): 88 | return self._num_examples 89 | @property 90 | def epochs_completed(self): 91 | return self._epochs_completed 92 | def next_batch(self, batch_size, fake_data=False): 93 | """Return the next `batch_size` examples from this data set.""" 94 | if fake_data: 95 | fake_image = [1.0 for _ in xrange(784)] 96 | fake_label = 0 97 | return [fake_image for _ in xrange(batch_size)], [ 98 | fake_label for _ in xrange(batch_size)] 99 | start = self._index_in_epoch 100 | self._index_in_epoch += batch_size 101 | if self._index_in_epoch > self._num_examples: 102 | # Finished epoch 103 | self._epochs_completed += 1 104 | # Shuffle the data 105 | perm = numpy.arange(self._num_examples) 106 | numpy.random.shuffle(perm) 107 | self._images = self._images[perm] 108 | self._labels = self._labels[perm] 109 | # Start next epoch 110 | start = 0 111 | self._index_in_epoch = batch_size 112 | assert batch_size <= self._num_examples 113 | end = self._index_in_epoch 114 | return self._images[start:end], self._labels[start:end] 115 | def read_data_sets(train_dir, fake_data=False, one_hot=False): 116 | class DataSets(object): 117 | pass 118 | data_sets = DataSets() 119 | if fake_data: 120 | data_sets.train = DataSet([], [], fake_data=True) 121 | data_sets.validation = DataSet([], [], fake_data=True) 122 | data_sets.test = DataSet([], [], fake_data=True) 123 | return data_sets 124 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' 125 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' 126 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz' 127 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz' 128 | VALIDATION_SIZE = 5000 129 | local_file = maybe_download(TRAIN_IMAGES, train_dir) 130 | train_images = extract_images(local_file) 131 | local_file = maybe_download(TRAIN_LABELS, train_dir) 132 | train_labels = extract_labels(local_file, one_hot=one_hot) 133 | local_file = maybe_download(TEST_IMAGES, train_dir) 134 | test_images = extract_images(local_file) 135 | local_file = maybe_download(TEST_LABELS, train_dir) 136 | test_labels = extract_labels(local_file, one_hot=one_hot) 137 | validation_images = train_images[:VALIDATION_SIZE] 138 | validation_labels = train_labels[:VALIDATION_SIZE] 139 | train_images = train_images[VALIDATION_SIZE:] 140 | train_labels = train_labels[VALIDATION_SIZE:] 141 | data_sets.train = DataSet(train_images, train_labels) 142 | data_sets.validation = DataSet(validation_images, validation_labels) 143 | data_sets.test = DataSet(test_images, test_labels) 144 | return data_sets -------------------------------------------------------------------------------- /tensorboard/mnist_tensorboard.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the 'License'); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an 'AS IS' BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A simple MNIST classifier which displays summaries in TensorBoard. 16 | 17 | This is an unimpressive MNIST model, but it is a good example of using 18 | tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of 19 | naming summary tags so that they are grouped meaningfully in TensorBoard. 20 | 21 | It demonstrates the functionality of every TensorBoard dashboard. 22 | """ 23 | from __future__ import absolute_import 24 | from __future__ import division 25 | from __future__ import print_function 26 | 27 | import argparse 28 | import os 29 | import sys 30 | 31 | import tensorflow as tf 32 | 33 | from tensorflow.examples.tutorials.mnist import input_data 34 | 35 | FLAGS = None 36 | 37 | 38 | def train(): 39 | # Import data 40 | mnist = input_data.read_data_sets(FLAGS.data_dir, 41 | one_hot=True, 42 | fake_data=FLAGS.fake_data) 43 | 44 | sess = tf.InteractiveSession() 45 | # Create a multilayer model. 46 | 47 | # Input placeholders 48 | with tf.name_scope('input'): 49 | x = tf.placeholder(tf.float32, [None, 784], name='x-input') 50 | y_ = tf.placeholder(tf.float32, [None, 10], name='y-input') 51 | 52 | with tf.name_scope('input_reshape'): 53 | image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) 54 | tf.summary.image('input', image_shaped_input, 10) 55 | 56 | # We can't initialize these variables to 0 - the network will get stuck. 57 | def weight_variable(shape): 58 | """Create a weight variable with appropriate initialization.""" 59 | initial = tf.truncated_normal(shape, stddev=0.1) 60 | return tf.Variable(initial) 61 | 62 | def bias_variable(shape): 63 | """Create a bias variable with appropriate initialization.""" 64 | initial = tf.constant(0.1, shape=shape) 65 | return tf.Variable(initial) 66 | 67 | def variable_summaries(var): 68 | """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" 69 | with tf.name_scope('summaries'): 70 | mean = tf.reduce_mean(var) 71 | tf.summary.scalar('mean', mean) 72 | with tf.name_scope('stddev'): 73 | stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) 74 | tf.summary.scalar('stddev', stddev) 75 | tf.summary.scalar('max', tf.reduce_max(var)) 76 | tf.summary.scalar('min', tf.reduce_min(var)) 77 | tf.summary.histogram('histogram', var) 78 | 79 | def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): 80 | """Reusable code for making a simple neural net layer. 81 | 82 | It does a matrix multiply, bias add, and then uses ReLU to nonlinearize. 83 | It also sets up name scoping so that the resultant graph is easy to read, 84 | and adds a number of summary ops. 85 | """ 86 | # Adding a name scope ensures logical grouping of the layers in the graph. 87 | with tf.name_scope(layer_name): 88 | # This Variable will hold the state of the weights for the layer 89 | with tf.name_scope('weights'): 90 | weights = weight_variable([input_dim, output_dim]) 91 | variable_summaries(weights) 92 | with tf.name_scope('biases'): 93 | biases = bias_variable([output_dim]) 94 | variable_summaries(biases) 95 | with tf.name_scope('Wx_plus_b'): 96 | preactivate = tf.matmul(input_tensor, weights) + biases 97 | tf.summary.histogram('pre_activations', preactivate) 98 | activations = act(preactivate, name='activation') 99 | tf.summary.histogram('activations', activations) 100 | return activations 101 | 102 | hidden1 = nn_layer(x, 784, 500, 'layer1') 103 | 104 | with tf.name_scope('dropout'): 105 | keep_prob = tf.placeholder(tf.float32) 106 | tf.summary.scalar('dropout_keep_probability', keep_prob) 107 | dropped = tf.nn.dropout(hidden1, keep_prob) 108 | 109 | # Do not apply softmax activation yet, see below. 110 | y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity) 111 | 112 | with tf.name_scope('cross_entropy'): 113 | # The raw formulation of cross-entropy, 114 | # 115 | # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)), 116 | # reduction_indices=[1])) 117 | # 118 | # can be numerically unstable. 119 | # 120 | # So here we use tf.nn.softmax_cross_entropy_with_logits on the 121 | # raw outputs of the nn_layer above, and then average across 122 | # the batch. 123 | diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y) 124 | with tf.name_scope('total'): 125 | cross_entropy = tf.reduce_mean(diff) 126 | tf.summary.scalar('cross_entropy', cross_entropy) 127 | 128 | with tf.name_scope('train'): 129 | train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( 130 | cross_entropy) 131 | 132 | with tf.name_scope('accuracy'): 133 | with tf.name_scope('correct_prediction'): 134 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 135 | with tf.name_scope('accuracy'): 136 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 137 | tf.summary.scalar('accuracy', accuracy) 138 | 139 | # Merge all the summaries and write them out to 140 | # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) 141 | merged = tf.summary.merge_all() 142 | train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) 143 | test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') 144 | tf.global_variables_initializer().run() 145 | 146 | # Train the model, and also write summaries. 147 | # Every 10th step, measure test-set accuracy, and write test summaries 148 | # All other steps, run train_step on training data, & add training summaries 149 | 150 | def feed_dict(train): 151 | """Make a TensorFlow feed_dict: maps data onto Tensor placeholders.""" 152 | if train or FLAGS.fake_data: 153 | xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data) 154 | k = FLAGS.dropout 155 | else: 156 | xs, ys = mnist.test.images, mnist.test.labels 157 | k = 1.0 158 | return {x: xs, y_: ys, keep_prob: k} 159 | 160 | for i in range(FLAGS.max_steps): 161 | if i % 10 == 0: # Record summaries and test-set accuracy 162 | summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False)) 163 | test_writer.add_summary(summary, i) 164 | print('Accuracy at step %s: %s' % (i, acc)) 165 | else: # Record train set summaries, and train 166 | if i % 100 == 99: # Record execution stats 167 | run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) 168 | run_metadata = tf.RunMetadata() 169 | summary, _ = sess.run([merged, train_step], 170 | feed_dict=feed_dict(True), 171 | options=run_options, 172 | run_metadata=run_metadata) 173 | train_writer.add_run_metadata(run_metadata, 'step%03d' % i) 174 | train_writer.add_summary(summary, i) 175 | print('Adding run metadata for', i) 176 | else: # Record a summary 177 | summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True)) 178 | train_writer.add_summary(summary, i) 179 | train_writer.close() 180 | test_writer.close() 181 | 182 | 183 | def main(_): 184 | if tf.gfile.Exists(FLAGS.log_dir): 185 | tf.gfile.DeleteRecursively(FLAGS.log_dir) 186 | tf.gfile.MakeDirs(FLAGS.log_dir) 187 | train() 188 | 189 | 190 | if __name__ == '__main__': 191 | parser = argparse.ArgumentParser() 192 | parser.add_argument('--fake_data', nargs='?', const=True, type=bool, 193 | default=False, 194 | help='If true, uses fake data for unit testing.') 195 | parser.add_argument('--max_steps', type=int, default=1000, 196 | help='Number of steps to run trainer.') 197 | parser.add_argument('--learning_rate', type=float, default=0.001, 198 | help='Initial learning rate') 199 | parser.add_argument('--dropout', type=float, default=0.9, 200 | help='Keep probability for training dropout.') 201 | parser.add_argument( 202 | '--data_dir', 203 | type=str, 204 | default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 205 | 'tensorflow/mnist/input_data'), 206 | help='Directory for storing input data') 207 | parser.add_argument( 208 | '--log_dir', 209 | type=str, 210 | default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 211 | 'tensorflow/mnist/logs/mnist_with_summaries'), 212 | help='Summaries log directory') 213 | FLAGS, unparsed = parser.parse_known_args() 214 | tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) 215 | --------------------------------------------------------------------------------