├── .gitignore ├── LICENSE ├── README.md ├── examples ├── 1_Introduction │ ├── basic_eager_api.py │ ├── basic_operations.py │ └── helloworld.py ├── 2_BasicModels │ ├── gradient_boosted_decision_tree.py │ ├── kmeans.py │ ├── linear_regression.py │ ├── linear_regression_eager_api.py │ ├── logistic_regression.py │ ├── logistic_regression_eager_api.py │ ├── nearest_neighbor.py │ ├── random_forest.py │ └── word2vec.py ├── 3_NeuralNetworks │ ├── autoencoder.py │ ├── bidirectional_rnn.py │ ├── convolutional_network.py │ ├── convolutional_network_raw.py │ ├── dcgan.py │ ├── dynamic_rnn.py │ ├── gan.py │ ├── multilayer_perceptron.py │ ├── neural_network.py │ ├── neural_network_eager_api.py │ ├── neural_network_raw.py │ ├── recurrent_network.py │ └── variational_autoencoder.py ├── 4_Utils │ ├── save_restore_model.py │ ├── tensorboard_advanced.py │ └── tensorboard_basic.py ├── 5_DataManagement │ ├── build_an_image_dataset.py │ └── tensorflow_dataset_api.py ├── 6_MultiGPU │ ├── multigpu_basics.py │ └── multigpu_cnn.py └── README.md ├── input_data.py ├── notebooks ├── 0_Prerequisite │ ├── ml_introduction.ipynb │ └── mnist_dataset_intro.ipynb ├── 1_Introduction │ ├── basic_eager_api.ipynb │ ├── basic_operations.ipynb │ └── helloworld.ipynb ├── 2_BasicModels │ ├── gradient_boosted_decision_tree.ipynb │ ├── kmeans.ipynb │ ├── linear_regression.ipynb │ ├── linear_regression_eager_api.ipynb │ ├── logistic_regression.ipynb │ ├── logistic_regression_eager_api.ipynb │ ├── nearest_neighbor.ipynb │ ├── random_forest.ipynb │ └── word2vec.ipynb ├── 3_NeuralNetworks │ ├── autoencoder.ipynb │ ├── bidirectional_rnn.ipynb │ ├── convolutional_network.ipynb │ ├── convolutional_network_raw.ipynb │ ├── dcgan.ipynb │ ├── dynamic_rnn.ipynb │ ├── gan.ipynb │ ├── neural_network.ipynb │ ├── neural_network_eager_api.ipynb │ ├── neural_network_raw.ipynb │ ├── recurrent_network.ipynb │ └── variational_autoencoder.ipynb ├── 4_Utils │ ├── save_restore_model.ipynb │ ├── tensorboard_advanced.ipynb │ └── tensorboard_basic.ipynb ├── 5_DataManagement │ ├── build_an_image_dataset.ipynb │ ├── image_transformation.ipynb │ ├── load_data.ipynb │ ├── tensorflow_dataset_api.ipynb │ └── tfrecords.ipynb ├── 6_MultiGPU │ ├── multigpu_basics.ipynb │ └── multigpu_cnn.ipynb └── README.md ├── resources └── img │ ├── tensorboard_advanced_1.png │ ├── tensorboard_advanced_2.png │ ├── tensorboard_advanced_3.png │ ├── tensorboard_advanced_4.png │ ├── tensorboard_basic_1.png │ ├── tensorboard_basic_2.png │ └── tf2 │ ├── tensorboard1.png │ ├── tensorboard2.png │ ├── tensorboard3.png │ └── tensorboard4.png ├── tensorflow_v1 ├── README.md ├── examples │ ├── 1_Introduction │ │ ├── basic_eager_api.py │ │ ├── basic_operations.py │ │ └── helloworld.py │ ├── 2_BasicModels │ │ ├── gradient_boosted_decision_tree.py │ │ ├── kmeans.py │ │ ├── linear_regression.py │ │ ├── linear_regression_eager_api.py │ │ ├── logistic_regression.py │ │ ├── logistic_regression_eager_api.py │ │ ├── nearest_neighbor.py │ │ ├── random_forest.py │ │ └── word2vec.py │ ├── 3_NeuralNetworks │ │ ├── autoencoder.py │ │ ├── bidirectional_rnn.py │ │ ├── convolutional_network.py │ │ ├── convolutional_network_raw.py │ │ ├── dcgan.py │ │ ├── dynamic_rnn.py │ │ ├── gan.py │ │ ├── multilayer_perceptron.py │ │ ├── neural_network.py │ │ ├── neural_network_eager_api.py │ │ ├── neural_network_raw.py │ │ ├── recurrent_network.py │ │ └── variational_autoencoder.py │ ├── 4_Utils │ │ ├── save_restore_model.py │ │ ├── tensorboard_advanced.py │ │ └── tensorboard_basic.py │ ├── 5_DataManagement │ │ ├── build_an_image_dataset.py │ │ └── tensorflow_dataset_api.py │ └── 6_MultiGPU │ │ ├── multigpu_basics.py │ │ └── multigpu_cnn.py └── notebooks │ ├── 0_Prerequisite │ ├── ml_introduction.ipynb │ └── mnist_dataset_intro.ipynb │ ├── 1_Introduction │ ├── basic_eager_api.ipynb │ ├── basic_operations.ipynb │ └── helloworld.ipynb │ ├── 2_BasicModels │ ├── gradient_boosted_decision_tree.ipynb │ ├── kmeans.ipynb │ ├── linear_regression.ipynb │ ├── linear_regression_eager_api.ipynb │ ├── logistic_regression.ipynb │ ├── logistic_regression_eager_api.ipynb │ ├── nearest_neighbor.ipynb │ ├── random_forest.ipynb │ └── word2vec.ipynb │ ├── 3_NeuralNetworks │ ├── autoencoder.ipynb │ ├── bidirectional_rnn.ipynb │ ├── convolutional_network.ipynb │ ├── convolutional_network_raw.ipynb │ ├── dcgan.ipynb │ ├── dynamic_rnn.ipynb │ ├── gan.ipynb │ ├── neural_network.ipynb │ ├── neural_network_eager_api.ipynb │ ├── neural_network_raw.ipynb │ ├── recurrent_network.ipynb │ └── variational_autoencoder.ipynb │ ├── 4_Utils │ ├── save_restore_model.ipynb │ ├── tensorboard_advanced.ipynb │ └── tensorboard_basic.ipynb │ ├── 5_DataManagement │ ├── build_an_image_dataset.ipynb │ ├── image_transformation.ipynb │ ├── load_data.ipynb │ ├── tensorflow_dataset_api.ipynb │ └── tfrecords.ipynb │ └── 6_MultiGPU │ ├── multigpu_basics.ipynb │ └── multigpu_cnn.ipynb └── tensorflow_v2 ├── README.md └── notebooks ├── 0_Prerequisite ├── ml_introduction.ipynb └── mnist_dataset_intro.ipynb ├── 1_Introduction ├── basic_operations.ipynb └── helloworld.ipynb ├── 2_BasicModels ├── gradient_boosted_trees.ipynb ├── linear_regression.ipynb ├── logistic_regression.ipynb └── word2vec.ipynb ├── 3_NeuralNetworks ├── autoencoder.ipynb ├── bidirectional_rnn.ipynb ├── convolutional_network.ipynb ├── convolutional_network_raw.ipynb ├── dcgan.ipynb ├── dynamic_rnn.ipynb ├── neural_network.ipynb ├── neural_network_raw.ipynb └── recurrent_network.ipynb ├── 4_Utils ├── build_custom_layers.ipynb ├── save_restore_model.ipynb └── tensorboard.ipynb ├── 5_DataManagement ├── image_transformation.ipynb ├── load_data.ipynb └── tfrecords.ipynb └── 6_Hardware └── multigpu_training.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .ipynb_checkpoints 3 | *.pyc 4 | __pycache__ 5 | *.egg-info 6 | build/* 7 | dist/* 8 | *~ 9 | .cache 10 | .coverage 11 | checkpoint 12 | htmlcov 13 | mnist 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | 21 | All contributions by Aymeric Damien: 22 | Copyright (c) 2015, Aymeric Damien. 23 | All rights reserved. 24 | 25 | All other contributions: 26 | Copyright (c) 2015, the respective contributors. 27 | All rights reserved. 28 | 29 | Each contributor holds copyright over their respective contributions. 30 | The project versioning (Git) records all such contribution source information. -------------------------------------------------------------------------------- /examples/1_Introduction/basic_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Basic introduction to TensorFlow's Eager API. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | 7 | What is Eager API? 8 | " Eager execution is an imperative, define-by-run interface where operations are 9 | executed immediately as they are called from Python. This makes it easier to 10 | get started with TensorFlow, and can make research and development more 11 | intuitive. A vast majority of the TensorFlow API remains the same whether eager 12 | execution is enabled or not. As a result, the exact same code that constructs 13 | TensorFlow graphs (e.g. using the layers API) can be executed imperatively 14 | by using eager execution. Conversely, most models written with Eager enabled 15 | can be converted to a graph that can be further optimized and/or extracted 16 | for deployment in production without changing code. " - Rajat Monga 17 | 18 | ''' 19 | from __future__ import absolute_import, division, print_function 20 | 21 | import numpy as np 22 | import tensorflow as tf 23 | import tensorflow.contrib.eager as tfe 24 | 25 | # Set Eager API 26 | print("Setting Eager mode...") 27 | tfe.enable_eager_execution() 28 | 29 | # Define constant tensors 30 | print("Define constant tensors") 31 | a = tf.constant(2) 32 | print("a = %i" % a) 33 | b = tf.constant(3) 34 | print("b = %i" % b) 35 | 36 | # Run the operation without the need for tf.Session 37 | print("Running operations, without tf.Session") 38 | c = a + b 39 | print("a + b = %i" % c) 40 | d = a * b 41 | print("a * b = %i" % d) 42 | 43 | 44 | # Full compatibility with Numpy 45 | print("Mixing operations with Tensors and Numpy Arrays") 46 | 47 | # Define constant tensors 48 | a = tf.constant([[2., 1.], 49 | [1., 0.]], dtype=tf.float32) 50 | print("Tensor:\n a = %s" % a) 51 | b = np.array([[3., 0.], 52 | [5., 1.]], dtype=np.float32) 53 | print("NumpyArray:\n b = %s" % b) 54 | 55 | # Run the operation without the need for tf.Session 56 | print("Running operations, without tf.Session") 57 | 58 | c = a + b 59 | print("a + b = %s" % c) 60 | 61 | d = tf.matmul(a, b) 62 | print("a * b = %s" % d) 63 | 64 | print("Iterate through Tensor 'a':") 65 | for i in range(a.shape[0]): 66 | for j in range(a.shape[1]): 67 | print(a[i][j]) 68 | 69 | -------------------------------------------------------------------------------- /examples/1_Introduction/basic_operations.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Basic Operations example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Basic constant operations 13 | # The value returned by the constructor represents the output 14 | # of the Constant op. 15 | a = tf.constant(2) 16 | b = tf.constant(3) 17 | 18 | # Launch the default graph. 19 | with tf.Session() as sess: 20 | print("a=2, b=3") 21 | print("Addition with constants: %i" % sess.run(a+b)) 22 | print("Multiplication with constants: %i" % sess.run(a*b)) 23 | 24 | # Basic Operations with variable as graph input 25 | # The value returned by the constructor represents the output 26 | # of the Variable op. (define as input when running session) 27 | # tf Graph input 28 | a = tf.placeholder(tf.int16) 29 | b = tf.placeholder(tf.int16) 30 | 31 | # Define some operations 32 | add = tf.add(a, b) 33 | mul = tf.multiply(a, b) 34 | 35 | # Launch the default graph. 36 | with tf.Session() as sess: 37 | # Run every operation with variable input 38 | print("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})) 39 | print("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})) 40 | 41 | 42 | # ---------------- 43 | # More in details: 44 | # Matrix Multiplication from TensorFlow official tutorial 45 | 46 | # Create a Constant op that produces a 1x2 matrix. The op is 47 | # added as a node to the default graph. 48 | # 49 | # The value returned by the constructor represents the output 50 | # of the Constant op. 51 | matrix1 = tf.constant([[3., 3.]]) 52 | 53 | # Create another Constant that produces a 2x1 matrix. 54 | matrix2 = tf.constant([[2.],[2.]]) 55 | 56 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs. 57 | # The returned value, 'product', represents the result of the matrix 58 | # multiplication. 59 | product = tf.matmul(matrix1, matrix2) 60 | 61 | # To run the matmul op we call the session 'run()' method, passing 'product' 62 | # which represents the output of the matmul op. This indicates to the call 63 | # that we want to get the output of the matmul op back. 64 | # 65 | # All inputs needed by the op are run automatically by the session. They 66 | # typically are run in parallel. 67 | # 68 | # The call 'run(product)' thus causes the execution of threes ops in the 69 | # graph: the two constants and matmul. 70 | # 71 | # The output of the op is returned in 'result' as a numpy `ndarray` object. 72 | with tf.Session() as sess: 73 | result = sess.run(product) 74 | print(result) 75 | # ==> [[ 12.]] 76 | -------------------------------------------------------------------------------- /examples/1_Introduction/helloworld.py: -------------------------------------------------------------------------------- 1 | ''' 2 | HelloWorld example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Simple hello world using TensorFlow 13 | 14 | # Create a Constant op 15 | # The op is added as a node to the default graph. 16 | # 17 | # The value returned by the constructor represents the output 18 | # of the Constant op. 19 | hello = tf.constant('Hello, TensorFlow!') 20 | 21 | # Start tf session 22 | sess = tf.Session() 23 | 24 | # Run the op 25 | print(sess.run(hello)) 26 | -------------------------------------------------------------------------------- /examples/2_BasicModels/gradient_boosted_decision_tree.py: -------------------------------------------------------------------------------- 1 | """ Gradient Boosted Decision Tree (GBDT). 2 | 3 | Implement a Gradient Boosted Decision tree with TensorFlow to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | import tensorflow as tf 17 | from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier 18 | from tensorflow.contrib.boosted_trees.proto import learner_pb2 as gbdt_learner 19 | 20 | # Ignore all GPUs (current TF GBDT does not support GPU). 21 | import os 22 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 23 | 24 | # Import MNIST data 25 | # Set verbosity to display errors only (Remove this line for showing warnings) 26 | tf.logging.set_verbosity(tf.logging.ERROR) 27 | from tensorflow.examples.tutorials.mnist import input_data 28 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False, 29 | source_url='http://yann.lecun.com/exdb/mnist/') 30 | 31 | # Parameters 32 | batch_size = 4096 # The number of samples per batch 33 | num_classes = 10 # The 10 digits 34 | num_features = 784 # Each image is 28x28 pixels 35 | max_steps = 10000 36 | 37 | # GBDT Parameters 38 | learning_rate = 0.1 39 | l1_regul = 0. 40 | l2_regul = 1. 41 | examples_per_layer = 1000 42 | num_trees = 10 43 | max_depth = 16 44 | 45 | # Fill GBDT parameters into the config proto 46 | learner_config = gbdt_learner.LearnerConfig() 47 | learner_config.learning_rate_tuner.fixed.learning_rate = learning_rate 48 | learner_config.regularization.l1 = l1_regul 49 | learner_config.regularization.l2 = l2_regul / examples_per_layer 50 | learner_config.constraints.max_tree_depth = max_depth 51 | growing_mode = gbdt_learner.LearnerConfig.LAYER_BY_LAYER 52 | learner_config.growing_mode = growing_mode 53 | run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300) 54 | learner_config.multi_class_strategy = ( 55 | gbdt_learner.LearnerConfig.DIAGONAL_HESSIAN)\ 56 | 57 | # Create a TensorFlor GBDT Estimator 58 | gbdt_model = GradientBoostedDecisionTreeClassifier( 59 | model_dir=None, # No save directory specified 60 | learner_config=learner_config, 61 | n_classes=num_classes, 62 | examples_per_layer=examples_per_layer, 63 | num_trees=num_trees, 64 | center_bias=False, 65 | config=run_config) 66 | 67 | # Display TF info logs 68 | tf.logging.set_verbosity(tf.logging.INFO) 69 | 70 | # Define the input function for training 71 | input_fn = tf.estimator.inputs.numpy_input_fn( 72 | x={'images': mnist.train.images}, y=mnist.train.labels, 73 | batch_size=batch_size, num_epochs=None, shuffle=True) 74 | # Train the Model 75 | gbdt_model.fit(input_fn=input_fn, max_steps=max_steps) 76 | 77 | # Evaluate the Model 78 | # Define the input function for evaluating 79 | input_fn = tf.estimator.inputs.numpy_input_fn( 80 | x={'images': mnist.test.images}, y=mnist.test.labels, 81 | batch_size=batch_size, shuffle=False) 82 | # Use the Estimator 'evaluate' method 83 | e = gbdt_model.evaluate(input_fn=input_fn) 84 | 85 | print("Testing Accuracy:", e['accuracy']) 86 | -------------------------------------------------------------------------------- /examples/2_BasicModels/kmeans.py: -------------------------------------------------------------------------------- 1 | """ K-Means. 2 | 3 | Implement K-Means algorithm with TensorFlow, and apply it to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Note: This example requires TensorFlow v1.1.0 or over. 8 | 9 | Author: Aymeric Damien 10 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 11 | """ 12 | 13 | from __future__ import print_function 14 | 15 | import numpy as np 16 | import tensorflow as tf 17 | from tensorflow.contrib.factorization import KMeans 18 | 19 | # Ignore all GPUs, tf k-means does not benefit from it. 20 | import os 21 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | full_data_x = mnist.train.images 27 | 28 | # Parameters 29 | num_steps = 50 # Total steps to train 30 | batch_size = 1024 # The number of samples per batch 31 | k = 25 # The number of clusters 32 | num_classes = 10 # The 10 digits 33 | num_features = 784 # Each image is 28x28 pixels 34 | 35 | # Input images 36 | X = tf.placeholder(tf.float32, shape=[None, num_features]) 37 | # Labels (for assigning a label to a centroid and testing) 38 | Y = tf.placeholder(tf.float32, shape=[None, num_classes]) 39 | 40 | # K-Means Parameters 41 | kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine', 42 | use_mini_batch=True) 43 | 44 | # Build KMeans graph 45 | training_graph = kmeans.training_graph() 46 | 47 | if len(training_graph) > 6: # Tensorflow 1.4+ 48 | (all_scores, cluster_idx, scores, cluster_centers_initialized, 49 | cluster_centers_var, init_op, train_op) = training_graph 50 | else: 51 | (all_scores, cluster_idx, scores, cluster_centers_initialized, 52 | init_op, train_op) = training_graph 53 | 54 | cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple 55 | avg_distance = tf.reduce_mean(scores) 56 | 57 | # Initialize the variables (i.e. assign their default value) 58 | init_vars = tf.global_variables_initializer() 59 | 60 | # Start TensorFlow session 61 | sess = tf.Session() 62 | 63 | # Run the initializer 64 | sess.run(init_vars, feed_dict={X: full_data_x}) 65 | sess.run(init_op, feed_dict={X: full_data_x}) 66 | 67 | # Training 68 | for i in range(1, num_steps + 1): 69 | _, d, idx = sess.run([train_op, avg_distance, cluster_idx], 70 | feed_dict={X: full_data_x}) 71 | if i % 10 == 0 or i == 1: 72 | print("Step %i, Avg Distance: %f" % (i, d)) 73 | 74 | # Assign a label to each centroid 75 | # Count total number of labels per centroid, using the label of each training 76 | # sample to their closest centroid (given by 'idx') 77 | counts = np.zeros(shape=(k, num_classes)) 78 | for i in range(len(idx)): 79 | counts[idx[i]] += mnist.train.labels[i] 80 | # Assign the most frequent label to the centroid 81 | labels_map = [np.argmax(c) for c in counts] 82 | labels_map = tf.convert_to_tensor(labels_map) 83 | 84 | # Evaluation ops 85 | # Lookup: centroid_id -> label 86 | cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx) 87 | # Compute accuracy 88 | correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32)) 89 | accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 90 | 91 | # Test Model 92 | test_x, test_y = mnist.test.images, mnist.test.labels 93 | print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y})) 94 | -------------------------------------------------------------------------------- /examples/2_BasicModels/linear_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A linear regression learning algorithm example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | import numpy 12 | import matplotlib.pyplot as plt 13 | rng = numpy.random 14 | 15 | # Parameters 16 | learning_rate = 0.01 17 | training_epochs = 1000 18 | display_step = 50 19 | 20 | # Training Data 21 | train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 22 | 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) 23 | train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 24 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) 25 | n_samples = train_X.shape[0] 26 | 27 | # tf Graph Input 28 | X = tf.placeholder("float") 29 | Y = tf.placeholder("float") 30 | 31 | # Set model weights 32 | W = tf.Variable(rng.randn(), name="weight") 33 | b = tf.Variable(rng.randn(), name="bias") 34 | 35 | # Construct a linear model 36 | pred = tf.add(tf.multiply(X, W), b) 37 | 38 | # Mean squared error 39 | cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples) 40 | # Gradient descent 41 | # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default 42 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 43 | 44 | # Initialize the variables (i.e. assign their default value) 45 | init = tf.global_variables_initializer() 46 | 47 | # Start training 48 | with tf.Session() as sess: 49 | 50 | # Run the initializer 51 | sess.run(init) 52 | 53 | # Fit all training data 54 | for epoch in range(training_epochs): 55 | for (x, y) in zip(train_X, train_Y): 56 | sess.run(optimizer, feed_dict={X: x, Y: y}) 57 | 58 | # Display logs per epoch step 59 | if (epoch+1) % display_step == 0: 60 | c = sess.run(cost, feed_dict={X: train_X, Y:train_Y}) 61 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \ 62 | "W=", sess.run(W), "b=", sess.run(b)) 63 | 64 | print("Optimization Finished!") 65 | training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) 66 | print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n') 67 | 68 | # Graphic display 69 | plt.plot(train_X, train_Y, 'ro', label='Original data') 70 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 71 | plt.legend() 72 | plt.show() 73 | 74 | # Testing example, as requested (Issue #2) 75 | test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1]) 76 | test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03]) 77 | 78 | print("Testing... (Mean square loss Comparison)") 79 | testing_cost = sess.run( 80 | tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]), 81 | feed_dict={X: test_X, Y: test_Y}) # same function as cost above 82 | print("Testing cost=", testing_cost) 83 | print("Absolute mean square loss difference:", abs( 84 | training_cost - testing_cost)) 85 | 86 | plt.plot(test_X, test_Y, 'bo', label='Testing data') 87 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 88 | plt.legend() 89 | plt.show() 90 | -------------------------------------------------------------------------------- /examples/2_BasicModels/linear_regression_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' Linear Regression with Eager API. 2 | 3 | A linear regression learning algorithm example using TensorFlow's Eager API. 4 | 5 | Author: Aymeric Damien 6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 7 | ''' 8 | from __future__ import absolute_import, division, print_function 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | import tensorflow as tf 13 | 14 | # Set Eager API 15 | tf.enable_eager_execution() 16 | tfe = tf.contrib.eager 17 | 18 | # Training Data 19 | train_X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 20 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1] 21 | train_Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 22 | 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3] 23 | n_samples = len(train_X) 24 | 25 | # Parameters 26 | learning_rate = 0.01 27 | display_step = 100 28 | num_steps = 1000 29 | 30 | # Weight and Bias 31 | W = tfe.Variable(np.random.randn()) 32 | b = tfe.Variable(np.random.randn()) 33 | 34 | 35 | # Linear regression (Wx + b) 36 | def linear_regression(inputs): 37 | return inputs * W + b 38 | 39 | 40 | # Mean square error 41 | def mean_square_fn(model_fn, inputs, labels): 42 | return tf.reduce_sum(tf.pow(model_fn(inputs) - labels, 2)) / (2 * n_samples) 43 | 44 | 45 | # SGD Optimizer 46 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 47 | # Compute gradients 48 | grad = tfe.implicit_gradients(mean_square_fn) 49 | 50 | # Initial cost, before optimizing 51 | print("Initial cost= {:.9f}".format( 52 | mean_square_fn(linear_regression, train_X, train_Y)), 53 | "W=", W.numpy(), "b=", b.numpy()) 54 | 55 | # Training 56 | for step in range(num_steps): 57 | 58 | optimizer.apply_gradients(grad(linear_regression, train_X, train_Y)) 59 | 60 | if (step + 1) % display_step == 0 or step == 0: 61 | print("Epoch:", '%04d' % (step + 1), "cost=", 62 | "{:.9f}".format(mean_square_fn(linear_regression, train_X, train_Y)), 63 | "W=", W.numpy(), "b=", b.numpy()) 64 | 65 | # Graphic display 66 | plt.plot(train_X, train_Y, 'ro', label='Original data') 67 | plt.plot(train_X, np.array(W * train_X + b), label='Fitted line') 68 | plt.legend() 69 | plt.show() 70 | -------------------------------------------------------------------------------- /examples/2_BasicModels/logistic_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A logistic regression learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | 24 | # tf Graph Input 25 | x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 26 | y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes 27 | 28 | # Set model weights 29 | W = tf.Variable(tf.zeros([784, 10])) 30 | b = tf.Variable(tf.zeros([10])) 31 | 32 | # Construct model 33 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 34 | 35 | # Minimize error using cross entropy 36 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 37 | # Gradient Descent 38 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 39 | 40 | # Initialize the variables (i.e. assign their default value) 41 | init = tf.global_variables_initializer() 42 | 43 | # Start training 44 | with tf.Session() as sess: 45 | 46 | # Run the initializer 47 | sess.run(init) 48 | 49 | # Training cycle 50 | for epoch in range(training_epochs): 51 | avg_cost = 0. 52 | total_batch = int(mnist.train.num_examples/batch_size) 53 | # Loop over all batches 54 | for i in range(total_batch): 55 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 56 | # Run optimization op (backprop) and cost op (to get loss value) 57 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, 58 | y: batch_ys}) 59 | # Compute average loss 60 | avg_cost += c / total_batch 61 | # Display logs per epoch step 62 | if (epoch+1) % display_step == 0: 63 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 64 | 65 | print("Optimization Finished!") 66 | 67 | # Test model 68 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 69 | # Calculate accuracy 70 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 71 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 72 | -------------------------------------------------------------------------------- /examples/2_BasicModels/logistic_regression_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' Logistic Regression with Eager API. 2 | 3 | A logistic regression learning algorithm example using TensorFlow's Eager API. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | ''' 10 | from __future__ import absolute_import, division, print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Set Eager API 15 | tf.enable_eager_execution() 16 | tfe = tf.contrib.eager 17 | 18 | # Import MNIST data 19 | from tensorflow.examples.tutorials.mnist import input_data 20 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 21 | 22 | # Parameters 23 | learning_rate = 0.1 24 | batch_size = 128 25 | num_steps = 1000 26 | display_step = 100 27 | 28 | dataset = tf.data.Dataset.from_tensor_slices( 29 | (mnist.train.images, mnist.train.labels)) 30 | dataset = dataset.repeat().batch(batch_size).prefetch(batch_size) 31 | dataset_iter = tfe.Iterator(dataset) 32 | 33 | # Variables 34 | W = tfe.Variable(tf.zeros([784, 10]), name='weights') 35 | b = tfe.Variable(tf.zeros([10]), name='bias') 36 | 37 | 38 | # Logistic regression (Wx + b) 39 | def logistic_regression(inputs): 40 | return tf.matmul(inputs, W) + b 41 | 42 | 43 | # Cross-Entropy loss function 44 | def loss_fn(inference_fn, inputs, labels): 45 | # Using sparse_softmax cross entropy 46 | return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 47 | logits=inference_fn(inputs), labels=labels)) 48 | 49 | 50 | # Calculate accuracy 51 | def accuracy_fn(inference_fn, inputs, labels): 52 | prediction = tf.nn.softmax(inference_fn(inputs)) 53 | correct_pred = tf.equal(tf.argmax(prediction, 1), labels) 54 | return tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 55 | 56 | 57 | # SGD Optimizer 58 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 59 | # Compute gradients 60 | grad = tfe.implicit_gradients(loss_fn) 61 | 62 | # Training 63 | average_loss = 0. 64 | average_acc = 0. 65 | for step in range(num_steps): 66 | 67 | # Iterate through the dataset 68 | d = dataset_iter.next() 69 | 70 | # Images 71 | x_batch = d[0] 72 | # Labels 73 | y_batch = tf.cast(d[1], dtype=tf.int64) 74 | 75 | # Compute the batch loss 76 | batch_loss = loss_fn(logistic_regression, x_batch, y_batch) 77 | average_loss += batch_loss 78 | # Compute the batch accuracy 79 | batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch) 80 | average_acc += batch_accuracy 81 | 82 | if step == 0: 83 | # Display the initial cost, before optimizing 84 | print("Initial loss= {:.9f}".format(average_loss)) 85 | 86 | # Update the variables following gradients info 87 | optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch)) 88 | 89 | # Display info 90 | if (step + 1) % display_step == 0 or step == 0: 91 | if step > 0: 92 | average_loss /= display_step 93 | average_acc /= display_step 94 | print("Step:", '%04d' % (step + 1), " loss=", 95 | "{:.9f}".format(average_loss), " accuracy=", 96 | "{:.4f}".format(average_acc)) 97 | average_loss = 0. 98 | average_acc = 0. 99 | 100 | # Evaluate model on the test image set 101 | testX = mnist.test.images 102 | testY = mnist.test.labels 103 | 104 | test_acc = accuracy_fn(logistic_regression, testX, testY) 105 | print("Testset Accuracy: {:.4f}".format(test_acc)) 106 | -------------------------------------------------------------------------------- /examples/2_BasicModels/nearest_neighbor.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A nearest neighbor learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import numpy as np 13 | import tensorflow as tf 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | # In this example, we limit mnist data 20 | Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) 21 | Xte, Yte = mnist.test.next_batch(200) #200 for testing 22 | 23 | # tf Graph Input 24 | xtr = tf.placeholder("float", [None, 784]) 25 | xte = tf.placeholder("float", [784]) 26 | 27 | # Nearest Neighbor calculation using L1 Distance 28 | # Calculate L1 Distance 29 | distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) 30 | # Prediction: Get min distance index (Nearest neighbor) 31 | pred = tf.arg_min(distance, 0) 32 | 33 | accuracy = 0. 34 | 35 | # Initialize the variables (i.e. assign their default value) 36 | init = tf.global_variables_initializer() 37 | 38 | # Start training 39 | with tf.Session() as sess: 40 | 41 | # Run the initializer 42 | sess.run(init) 43 | 44 | # loop over test data 45 | for i in range(len(Xte)): 46 | # Get nearest neighbor 47 | nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) 48 | # Get nearest neighbor class label and compare it to its true label 49 | print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \ 50 | "True Class:", np.argmax(Yte[i])) 51 | # Calculate accuracy 52 | if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): 53 | accuracy += 1./len(Xte) 54 | print("Done!") 55 | print("Accuracy:", accuracy) 56 | -------------------------------------------------------------------------------- /examples/2_BasicModels/random_forest.py: -------------------------------------------------------------------------------- 1 | """ Random Forest. 2 | 3 | Implement Random Forest algorithm with TensorFlow, and apply it to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | 11 | from __future__ import print_function 12 | 13 | import tensorflow as tf 14 | from tensorflow.contrib.tensor_forest.python import tensor_forest 15 | from tensorflow.python.ops import resources 16 | 17 | # Ignore all GPUs, tf random forest does not benefit from it. 18 | import os 19 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 20 | 21 | # Import MNIST data 22 | from tensorflow.examples.tutorials.mnist import input_data 23 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 24 | 25 | # Parameters 26 | num_steps = 500 # Total steps to train 27 | batch_size = 1024 # The number of samples per batch 28 | num_classes = 10 # The 10 digits 29 | num_features = 784 # Each image is 28x28 pixels 30 | num_trees = 10 31 | max_nodes = 1000 32 | 33 | # Input and Target data 34 | X = tf.placeholder(tf.float32, shape=[None, num_features]) 35 | # For random forest, labels must be integers (the class id) 36 | Y = tf.placeholder(tf.int32, shape=[None]) 37 | 38 | # Random Forest Parameters 39 | hparams = tensor_forest.ForestHParams(num_classes=num_classes, 40 | num_features=num_features, 41 | num_trees=num_trees, 42 | max_nodes=max_nodes).fill() 43 | 44 | # Build the Random Forest 45 | forest_graph = tensor_forest.RandomForestGraphs(hparams) 46 | # Get training graph and loss 47 | train_op = forest_graph.training_graph(X, Y) 48 | loss_op = forest_graph.training_loss(X, Y) 49 | 50 | # Measure the accuracy 51 | infer_op, _, _ = forest_graph.inference_graph(X) 52 | correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64)) 53 | accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 54 | 55 | # Initialize the variables (i.e. assign their default value) and forest resources 56 | init_vars = tf.group(tf.global_variables_initializer(), 57 | resources.initialize_resources(resources.shared_resources())) 58 | 59 | # Start TensorFlow session 60 | sess = tf.Session() 61 | 62 | # Run the initializer 63 | sess.run(init_vars) 64 | 65 | # Training 66 | for i in range(1, num_steps + 1): 67 | # Prepare Data 68 | # Get the next batch of MNIST data (only images are needed, not labels) 69 | batch_x, batch_y = mnist.train.next_batch(batch_size) 70 | _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y}) 71 | if i % 50 == 0 or i == 1: 72 | acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y}) 73 | print('Step %i, Loss: %f, Acc: %f' % (i, l, acc)) 74 | 75 | # Test Model 76 | test_x, test_y = mnist.test.images, mnist.test.labels 77 | print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y})) 78 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/autoencoder.py: -------------------------------------------------------------------------------- 1 | """ Auto Encoder Example. 2 | 3 | Build a 2 layers auto-encoder with TensorFlow to compress images to a 4 | lower latent space and then reconstruct them. 5 | 6 | References: 7 | Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based 8 | learning applied to document recognition." Proceedings of the IEEE, 9 | 86(11):2278-2324, November 1998. 10 | 11 | Links: 12 | [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ 13 | 14 | Author: Aymeric Damien 15 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 16 | """ 17 | from __future__ import division, print_function, absolute_import 18 | 19 | import tensorflow as tf 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | 27 | # Training Parameters 28 | learning_rate = 0.01 29 | num_steps = 30000 30 | batch_size = 256 31 | 32 | display_step = 1000 33 | examples_to_show = 10 34 | 35 | # Network Parameters 36 | num_hidden_1 = 256 # 1st layer num features 37 | num_hidden_2 = 128 # 2nd layer num features (the latent dim) 38 | num_input = 784 # MNIST data input (img shape: 28*28) 39 | 40 | # tf Graph input (only pictures) 41 | X = tf.placeholder("float", [None, num_input]) 42 | 43 | weights = { 44 | 'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])), 45 | 'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])), 46 | 'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])), 47 | 'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input])), 48 | } 49 | biases = { 50 | 'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])), 51 | 'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])), 52 | 'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])), 53 | 'decoder_b2': tf.Variable(tf.random_normal([num_input])), 54 | } 55 | 56 | # Building the encoder 57 | def encoder(x): 58 | # Encoder Hidden layer with sigmoid activation #1 59 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), 60 | biases['encoder_b1'])) 61 | # Encoder Hidden layer with sigmoid activation #2 62 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), 63 | biases['encoder_b2'])) 64 | return layer_2 65 | 66 | 67 | # Building the decoder 68 | def decoder(x): 69 | # Decoder Hidden layer with sigmoid activation #1 70 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), 71 | biases['decoder_b1'])) 72 | # Decoder Hidden layer with sigmoid activation #2 73 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), 74 | biases['decoder_b2'])) 75 | return layer_2 76 | 77 | # Construct model 78 | encoder_op = encoder(X) 79 | decoder_op = decoder(encoder_op) 80 | 81 | # Prediction 82 | y_pred = decoder_op 83 | # Targets (Labels) are the input data. 84 | y_true = X 85 | 86 | # Define loss and optimizer, minimize the squared error 87 | loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) 88 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss) 89 | 90 | # Initialize the variables (i.e. assign their default value) 91 | init = tf.global_variables_initializer() 92 | 93 | # Start Training 94 | # Start a new TF session 95 | with tf.Session() as sess: 96 | 97 | # Run the initializer 98 | sess.run(init) 99 | 100 | # Training 101 | for i in range(1, num_steps+1): 102 | # Prepare Data 103 | # Get the next batch of MNIST data (only images are needed, not labels) 104 | batch_x, _ = mnist.train.next_batch(batch_size) 105 | 106 | # Run optimization op (backprop) and cost op (to get loss value) 107 | _, l = sess.run([optimizer, loss], feed_dict={X: batch_x}) 108 | # Display logs per step 109 | if i % display_step == 0 or i == 1: 110 | print('Step %i: Minibatch Loss: %f' % (i, l)) 111 | 112 | # Testing 113 | # Encode and decode images from test set and visualize their reconstruction. 114 | n = 4 115 | canvas_orig = np.empty((28 * n, 28 * n)) 116 | canvas_recon = np.empty((28 * n, 28 * n)) 117 | for i in range(n): 118 | # MNIST test set 119 | batch_x, _ = mnist.test.next_batch(n) 120 | # Encode and decode the digit image 121 | g = sess.run(decoder_op, feed_dict={X: batch_x}) 122 | 123 | # Display original images 124 | for j in range(n): 125 | # Draw the original digits 126 | canvas_orig[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \ 127 | batch_x[j].reshape([28, 28]) 128 | # Display reconstructed images 129 | for j in range(n): 130 | # Draw the reconstructed digits 131 | canvas_recon[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \ 132 | g[j].reshape([28, 28]) 133 | 134 | print("Original Images") 135 | plt.figure(figsize=(n, n)) 136 | plt.imshow(canvas_orig, origin="upper", cmap="gray") 137 | plt.show() 138 | 139 | print("Reconstructed Images") 140 | plt.figure(figsize=(n, n)) 141 | plt.imshow(canvas_recon, origin="upper", cmap="gray") 142 | plt.show() 143 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/bidirectional_rnn.py: -------------------------------------------------------------------------------- 1 | """ Bi-directional Recurrent Neural Network. 2 | 3 | A Bi-directional Recurrent Neural Network (LSTM) implementation example using 4 | TensorFlow library. This example is using the MNIST database of handwritten 5 | digits (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Links: 8 | [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) 9 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 10 | 11 | Author: Aymeric Damien 12 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 13 | """ 14 | 15 | from __future__ import print_function 16 | 17 | import tensorflow as tf 18 | from tensorflow.contrib import rnn 19 | import numpy as np 20 | 21 | # Import MNIST data 22 | from tensorflow.examples.tutorials.mnist import input_data 23 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 24 | 25 | ''' 26 | To classify images using a bidirectional recurrent neural network, we consider 27 | every image row as a sequence of pixels. Because MNIST image shape is 28*28px, 28 | we will then handle 28 sequences of 28 steps for every sample. 29 | ''' 30 | 31 | # Training Parameters 32 | learning_rate = 0.001 33 | training_steps = 10000 34 | batch_size = 128 35 | display_step = 200 36 | 37 | # Network Parameters 38 | num_input = 28 # MNIST data input (img shape: 28*28) 39 | timesteps = 28 # timesteps 40 | num_hidden = 128 # hidden layer num of features 41 | num_classes = 10 # MNIST total classes (0-9 digits) 42 | 43 | # tf Graph input 44 | X = tf.placeholder("float", [None, timesteps, num_input]) 45 | Y = tf.placeholder("float", [None, num_classes]) 46 | 47 | # Define weights 48 | weights = { 49 | # Hidden layer weights => 2*n_hidden because of forward + backward cells 50 | 'out': tf.Variable(tf.random_normal([2*num_hidden, num_classes])) 51 | } 52 | biases = { 53 | 'out': tf.Variable(tf.random_normal([num_classes])) 54 | } 55 | 56 | 57 | def BiRNN(x, weights, biases): 58 | 59 | # Prepare data shape to match `rnn` function requirements 60 | # Current data input shape: (batch_size, timesteps, n_input) 61 | # Required shape: 'timesteps' tensors list of shape (batch_size, num_input) 62 | 63 | # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input) 64 | x = tf.unstack(x, timesteps, 1) 65 | 66 | # Define lstm cells with tensorflow 67 | # Forward direction cell 68 | lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 69 | # Backward direction cell 70 | lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 71 | 72 | # Get lstm cell output 73 | try: 74 | outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 75 | dtype=tf.float32) 76 | except Exception: # Old TensorFlow version only returns outputs not states 77 | outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 78 | dtype=tf.float32) 79 | 80 | # Linear activation, using rnn inner loop last output 81 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 82 | 83 | logits = BiRNN(X, weights, biases) 84 | prediction = tf.nn.softmax(logits) 85 | 86 | # Define loss and optimizer 87 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 88 | logits=logits, labels=Y)) 89 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 90 | train_op = optimizer.minimize(loss_op) 91 | 92 | # Evaluate model (with test logits, for dropout to be disabled) 93 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 94 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 95 | 96 | # Initialize the variables (i.e. assign their default value) 97 | init = tf.global_variables_initializer() 98 | 99 | # Start training 100 | with tf.Session() as sess: 101 | 102 | # Run the initializer 103 | sess.run(init) 104 | 105 | for step in range(1, training_steps+1): 106 | batch_x, batch_y = mnist.train.next_batch(batch_size) 107 | # Reshape data to get 28 seq of 28 elements 108 | batch_x = batch_x.reshape((batch_size, timesteps, num_input)) 109 | # Run optimization op (backprop) 110 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 111 | if step % display_step == 0 or step == 1: 112 | # Calculate batch loss and accuracy 113 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 114 | Y: batch_y}) 115 | print("Step " + str(step) + ", Minibatch Loss= " + \ 116 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 117 | "{:.3f}".format(acc)) 118 | 119 | print("Optimization Finished!") 120 | 121 | # Calculate accuracy for 128 mnist test images 122 | test_len = 128 123 | test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input)) 124 | test_label = mnist.test.labels[:test_len] 125 | print("Testing Accuracy:", \ 126 | sess.run(accuracy, feed_dict={X: test_data, Y: test_label})) 127 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/convolutional_network.py: -------------------------------------------------------------------------------- 1 | """ Convolutional Neural Network. 2 | 3 | Build and train a convolutional neural network with TensorFlow. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | This example is using TensorFlow layers API, see 'convolutional_network_raw' 8 | example for a raw implementation with variables. 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | from __future__ import division, print_function, absolute_import 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 18 | 19 | import tensorflow as tf 20 | 21 | # Training Parameters 22 | learning_rate = 0.001 23 | num_steps = 2000 24 | batch_size = 128 25 | 26 | # Network Parameters 27 | num_input = 784 # MNIST data input (img shape: 28*28) 28 | num_classes = 10 # MNIST total classes (0-9 digits) 29 | dropout = 0.25 # Dropout, probability to drop a unit 30 | 31 | 32 | # Create the neural network 33 | def conv_net(x_dict, n_classes, dropout, reuse, is_training): 34 | # Define a scope for reusing the variables 35 | with tf.variable_scope('ConvNet', reuse=reuse): 36 | # TF Estimator input is a dict, in case of multiple inputs 37 | x = x_dict['images'] 38 | 39 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 40 | # Reshape to match picture format [Height x Width x Channel] 41 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 42 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 43 | 44 | # Convolution Layer with 32 filters and a kernel size of 5 45 | conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) 46 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 47 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 48 | 49 | # Convolution Layer with 64 filters and a kernel size of 3 50 | conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu) 51 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 52 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 53 | 54 | # Flatten the data to a 1-D vector for the fully connected layer 55 | fc1 = tf.contrib.layers.flatten(conv2) 56 | 57 | # Fully connected layer (in tf contrib folder for now) 58 | fc1 = tf.layers.dense(fc1, 1024) 59 | # Apply Dropout (if is_training is False, dropout is not applied) 60 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 61 | 62 | # Output layer, class prediction 63 | out = tf.layers.dense(fc1, n_classes) 64 | 65 | return out 66 | 67 | 68 | # Define the model function (following TF Estimator Template) 69 | def model_fn(features, labels, mode): 70 | # Build the neural network 71 | # Because Dropout have different behavior at training and prediction time, we 72 | # need to create 2 distinct computation graphs that still share the same weights. 73 | logits_train = conv_net(features, num_classes, dropout, reuse=False, 74 | is_training=True) 75 | logits_test = conv_net(features, num_classes, dropout, reuse=True, 76 | is_training=False) 77 | 78 | # Predictions 79 | pred_classes = tf.argmax(logits_test, axis=1) 80 | pred_probas = tf.nn.softmax(logits_test) 81 | 82 | # If prediction mode, early return 83 | if mode == tf.estimator.ModeKeys.PREDICT: 84 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 85 | 86 | # Define loss and optimizer 87 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 88 | logits=logits_train, labels=tf.cast(labels, dtype=tf.int32))) 89 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 90 | train_op = optimizer.minimize(loss_op, 91 | global_step=tf.train.get_global_step()) 92 | 93 | # Evaluate the accuracy of the model 94 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 95 | 96 | # TF Estimators requires to return a EstimatorSpec, that specify 97 | # the different ops for training, evaluating, ... 98 | estim_specs = tf.estimator.EstimatorSpec( 99 | mode=mode, 100 | predictions=pred_classes, 101 | loss=loss_op, 102 | train_op=train_op, 103 | eval_metric_ops={'accuracy': acc_op}) 104 | 105 | return estim_specs 106 | 107 | # Build the Estimator 108 | model = tf.estimator.Estimator(model_fn) 109 | 110 | # Define the input function for training 111 | input_fn = tf.estimator.inputs.numpy_input_fn( 112 | x={'images': mnist.train.images}, y=mnist.train.labels, 113 | batch_size=batch_size, num_epochs=None, shuffle=True) 114 | # Train the Model 115 | model.train(input_fn, steps=num_steps) 116 | 117 | # Evaluate the Model 118 | # Define the input function for evaluating 119 | input_fn = tf.estimator.inputs.numpy_input_fn( 120 | x={'images': mnist.test.images}, y=mnist.test.labels, 121 | batch_size=batch_size, shuffle=False) 122 | # Use the Estimator 'evaluate' method 123 | e = model.evaluate(input_fn) 124 | 125 | print("Testing Accuracy:", e['accuracy']) 126 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/convolutional_network_raw.py: -------------------------------------------------------------------------------- 1 | """ Convolutional Neural Network. 2 | 3 | Build and train a convolutional neural network with TensorFlow. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | 11 | from __future__ import division, print_function, absolute_import 12 | 13 | import tensorflow as tf 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | # Training Parameters 20 | learning_rate = 0.001 21 | num_steps = 200 22 | batch_size = 128 23 | display_step = 10 24 | 25 | # Network Parameters 26 | num_input = 784 # MNIST data input (img shape: 28*28) 27 | num_classes = 10 # MNIST total classes (0-9 digits) 28 | dropout = 0.75 # Dropout, probability to keep units 29 | 30 | # tf Graph input 31 | X = tf.placeholder(tf.float32, [None, num_input]) 32 | Y = tf.placeholder(tf.float32, [None, num_classes]) 33 | keep_prob = tf.placeholder(tf.float32) # dropout (keep probability) 34 | 35 | 36 | # Create some wrappers for simplicity 37 | def conv2d(x, W, b, strides=1): 38 | # Conv2D wrapper, with bias and relu activation 39 | x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') 40 | x = tf.nn.bias_add(x, b) 41 | return tf.nn.relu(x) 42 | 43 | 44 | def maxpool2d(x, k=2): 45 | # MaxPool2D wrapper 46 | return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], 47 | padding='SAME') 48 | 49 | 50 | # Create model 51 | def conv_net(x, weights, biases, dropout): 52 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 53 | # Reshape to match picture format [Height x Width x Channel] 54 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 55 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 56 | 57 | # Convolution Layer 58 | conv1 = conv2d(x, weights['wc1'], biases['bc1']) 59 | # Max Pooling (down-sampling) 60 | conv1 = maxpool2d(conv1, k=2) 61 | 62 | # Convolution Layer 63 | conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) 64 | # Max Pooling (down-sampling) 65 | conv2 = maxpool2d(conv2, k=2) 66 | 67 | # Fully connected layer 68 | # Reshape conv2 output to fit fully connected layer input 69 | fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) 70 | fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) 71 | fc1 = tf.nn.relu(fc1) 72 | # Apply Dropout 73 | fc1 = tf.nn.dropout(fc1, dropout) 74 | 75 | # Output, class prediction 76 | out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) 77 | return out 78 | 79 | # Store layers weight & bias 80 | weights = { 81 | # 5x5 conv, 1 input, 32 outputs 82 | 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), 83 | # 5x5 conv, 32 inputs, 64 outputs 84 | 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), 85 | # fully connected, 7*7*64 inputs, 1024 outputs 86 | 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), 87 | # 1024 inputs, 10 outputs (class prediction) 88 | 'out': tf.Variable(tf.random_normal([1024, num_classes])) 89 | } 90 | 91 | biases = { 92 | 'bc1': tf.Variable(tf.random_normal([32])), 93 | 'bc2': tf.Variable(tf.random_normal([64])), 94 | 'bd1': tf.Variable(tf.random_normal([1024])), 95 | 'out': tf.Variable(tf.random_normal([num_classes])) 96 | } 97 | 98 | # Construct model 99 | logits = conv_net(X, weights, biases, keep_prob) 100 | prediction = tf.nn.softmax(logits) 101 | 102 | # Define loss and optimizer 103 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 104 | logits=logits, labels=Y)) 105 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 106 | train_op = optimizer.minimize(loss_op) 107 | 108 | 109 | # Evaluate model 110 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 111 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 112 | 113 | # Initialize the variables (i.e. assign their default value) 114 | init = tf.global_variables_initializer() 115 | 116 | # Start training 117 | with tf.Session() as sess: 118 | 119 | # Run the initializer 120 | sess.run(init) 121 | 122 | for step in range(1, num_steps+1): 123 | batch_x, batch_y = mnist.train.next_batch(batch_size) 124 | # Run optimization op (backprop) 125 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.8}) 126 | if step % display_step == 0 or step == 1: 127 | # Calculate batch loss and accuracy 128 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 129 | Y: batch_y, 130 | keep_prob: 1.0}) 131 | print("Step " + str(step) + ", Minibatch Loss= " + \ 132 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 133 | "{:.3f}".format(acc)) 134 | 135 | print("Optimization Finished!") 136 | 137 | # Calculate accuracy for 256 MNIST test images 138 | print("Testing Accuracy:", \ 139 | sess.run(accuracy, feed_dict={X: mnist.test.images[:256], 140 | Y: mnist.test.labels[:256], 141 | keep_prob: 1.0})) 142 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/multilayer_perceptron.py: -------------------------------------------------------------------------------- 1 | """ Multilayer Perceptron. 2 | 3 | A Multilayer Perceptron (Neural Network) implementation example using 4 | TensorFlow library. This example is using the MNIST database of handwritten 5 | digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | # ------------------------------------------------------------------ 15 | # 16 | # THIS EXAMPLE HAS BEEN RENAMED 'neural_network.py', FOR SIMPLICITY. 17 | # 18 | # ------------------------------------------------------------------ 19 | 20 | 21 | from __future__ import print_function 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | 27 | import tensorflow as tf 28 | 29 | # Parameters 30 | learning_rate = 0.001 31 | training_epochs = 15 32 | batch_size = 100 33 | display_step = 1 34 | 35 | # Network Parameters 36 | n_hidden_1 = 256 # 1st layer number of neurons 37 | n_hidden_2 = 256 # 2nd layer number of neurons 38 | n_input = 784 # MNIST data input (img shape: 28*28) 39 | n_classes = 10 # MNIST total classes (0-9 digits) 40 | 41 | # tf Graph input 42 | X = tf.placeholder("float", [None, n_input]) 43 | Y = tf.placeholder("float", [None, n_classes]) 44 | 45 | # Store layers weight & bias 46 | weights = { 47 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 48 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 49 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 50 | } 51 | biases = { 52 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 53 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 54 | 'out': tf.Variable(tf.random_normal([n_classes])) 55 | } 56 | 57 | 58 | # Create model 59 | def multilayer_perceptron(x): 60 | # Hidden fully connected layer with 256 neurons 61 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 62 | # Hidden fully connected layer with 256 neurons 63 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 64 | # Output fully connected layer with a neuron for each class 65 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 66 | return out_layer 67 | 68 | # Construct model 69 | logits = multilayer_perceptron(X) 70 | 71 | # Define loss and optimizer 72 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 73 | logits=logits, labels=Y)) 74 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 75 | train_op = optimizer.minimize(loss_op) 76 | # Initializing the variables 77 | init = tf.global_variables_initializer() 78 | 79 | with tf.Session() as sess: 80 | sess.run(init) 81 | 82 | # Training cycle 83 | for epoch in range(training_epochs): 84 | avg_cost = 0. 85 | total_batch = int(mnist.train.num_examples/batch_size) 86 | # Loop over all batches 87 | for i in range(total_batch): 88 | batch_x, batch_y = mnist.train.next_batch(batch_size) 89 | # Run optimization op (backprop) and cost op (to get loss value) 90 | _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, 91 | Y: batch_y}) 92 | # Compute average loss 93 | avg_cost += c / total_batch 94 | # Display logs per epoch step 95 | if epoch % display_step == 0: 96 | print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost)) 97 | print("Optimization Finished!") 98 | 99 | # Test model 100 | pred = tf.nn.softmax(logits) # Apply softmax to logits 101 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1)) 102 | # Calculate accuracy 103 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 104 | print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels})) 105 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/neural_network.py: -------------------------------------------------------------------------------- 1 | """ Neural Network. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | This example is using TensorFlow layers, see 'neural_network_raw' example for 8 | a raw implementation with variables. 9 | 10 | Links: 11 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 12 | 13 | Author: Aymeric Damien 14 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 15 | """ 16 | 17 | from __future__ import print_function 18 | 19 | # Import MNIST data 20 | from tensorflow.examples.tutorials.mnist import input_data 21 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 22 | 23 | import tensorflow as tf 24 | 25 | # Parameters 26 | learning_rate = 0.1 27 | num_steps = 1000 28 | batch_size = 128 29 | display_step = 100 30 | 31 | # Network Parameters 32 | n_hidden_1 = 256 # 1st layer number of neurons 33 | n_hidden_2 = 256 # 2nd layer number of neurons 34 | num_input = 784 # MNIST data input (img shape: 28*28) 35 | num_classes = 10 # MNIST total classes (0-9 digits) 36 | 37 | 38 | # Define the neural network 39 | def neural_net(x_dict): 40 | # TF Estimator input is a dict, in case of multiple inputs 41 | x = x_dict['images'] 42 | # Hidden fully connected layer with 256 neurons 43 | layer_1 = tf.layers.dense(x, n_hidden_1) 44 | # Hidden fully connected layer with 256 neurons 45 | layer_2 = tf.layers.dense(layer_1, n_hidden_2) 46 | # Output fully connected layer with a neuron for each class 47 | out_layer = tf.layers.dense(layer_2, num_classes) 48 | return out_layer 49 | 50 | 51 | # Define the model function (following TF Estimator Template) 52 | def model_fn(features, labels, mode): 53 | # Build the neural network 54 | logits = neural_net(features) 55 | 56 | # Predictions 57 | pred_classes = tf.argmax(logits, axis=1) 58 | pred_probas = tf.nn.softmax(logits) 59 | 60 | # If prediction mode, early return 61 | if mode == tf.estimator.ModeKeys.PREDICT: 62 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 63 | 64 | # Define loss and optimizer 65 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 66 | logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 67 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 68 | train_op = optimizer.minimize(loss_op, 69 | global_step=tf.train.get_global_step()) 70 | 71 | # Evaluate the accuracy of the model 72 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 73 | 74 | # TF Estimators requires to return a EstimatorSpec, that specify 75 | # the different ops for training, evaluating, ... 76 | estim_specs = tf.estimator.EstimatorSpec( 77 | mode=mode, 78 | predictions=pred_classes, 79 | loss=loss_op, 80 | train_op=train_op, 81 | eval_metric_ops={'accuracy': acc_op}) 82 | 83 | return estim_specs 84 | 85 | # Build the Estimator 86 | model = tf.estimator.Estimator(model_fn) 87 | 88 | # Define the input function for training 89 | input_fn = tf.estimator.inputs.numpy_input_fn( 90 | x={'images': mnist.train.images}, y=mnist.train.labels, 91 | batch_size=batch_size, num_epochs=None, shuffle=True) 92 | # Train the Model 93 | model.train(input_fn, steps=num_steps) 94 | 95 | # Evaluate the Model 96 | # Define the input function for evaluating 97 | input_fn = tf.estimator.inputs.numpy_input_fn( 98 | x={'images': mnist.test.images}, y=mnist.test.labels, 99 | batch_size=batch_size, shuffle=False) 100 | # Use the Estimator 'evaluate' method 101 | e = model.evaluate(input_fn) 102 | 103 | print("Testing Accuracy:", e['accuracy']) 104 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/neural_network_eager_api.py: -------------------------------------------------------------------------------- 1 | """ Neural Network with Eager API. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow's Eager API. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | This example is using TensorFlow layers, see 'neural_network_raw' example for 8 | a raw implementation with variables. 9 | 10 | Links: 11 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 12 | 13 | Author: Aymeric Damien 14 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 15 | """ 16 | from __future__ import print_function 17 | 18 | import tensorflow as tf 19 | 20 | # Set Eager API 21 | tf.enable_eager_execution() 22 | tfe = tf.contrib.eager 23 | 24 | # Import MNIST data 25 | from tensorflow.examples.tutorials.mnist import input_data 26 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 27 | 28 | # Parameters 29 | learning_rate = 0.001 30 | num_steps = 1000 31 | batch_size = 128 32 | display_step = 100 33 | 34 | # Network Parameters 35 | n_hidden_1 = 256 # 1st layer number of neurons 36 | n_hidden_2 = 256 # 2nd layer number of neurons 37 | num_input = 784 # MNIST data input (img shape: 28*28) 38 | num_classes = 10 # MNIST total classes (0-9 digits) 39 | 40 | # Using TF Dataset to split data into batches 41 | dataset = tf.data.Dataset.from_tensor_slices( 42 | (mnist.train.images, mnist.train.labels)) 43 | dataset = dataset.repeat().batch(batch_size).prefetch(batch_size) 44 | dataset_iter = tfe.Iterator(dataset) 45 | 46 | 47 | # Define the neural network. To use eager API and tf.layers API together, 48 | # we must instantiate a tfe.Network class as follow: 49 | class NeuralNet(tfe.Network): 50 | def __init__(self): 51 | # Define each layer 52 | super(NeuralNet, self).__init__() 53 | # Hidden fully connected layer with 256 neurons 54 | self.layer1 = self.track_layer( 55 | tf.layers.Dense(n_hidden_1, activation=tf.nn.relu)) 56 | # Hidden fully connected layer with 256 neurons 57 | self.layer2 = self.track_layer( 58 | tf.layers.Dense(n_hidden_2, activation=tf.nn.relu)) 59 | # Output fully connected layer with a neuron for each class 60 | self.out_layer = self.track_layer(tf.layers.Dense(num_classes)) 61 | 62 | def call(self, x): 63 | x = self.layer1(x) 64 | x = self.layer2(x) 65 | return self.out_layer(x) 66 | 67 | 68 | neural_net = NeuralNet() 69 | 70 | 71 | # Cross-Entropy loss function 72 | def loss_fn(inference_fn, inputs, labels): 73 | # Using sparse_softmax cross entropy 74 | return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 75 | logits=inference_fn(inputs), labels=labels)) 76 | 77 | 78 | # Calculate accuracy 79 | def accuracy_fn(inference_fn, inputs, labels): 80 | prediction = tf.nn.softmax(inference_fn(inputs)) 81 | correct_pred = tf.equal(tf.argmax(prediction, 1), labels) 82 | return tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 83 | 84 | 85 | # SGD Optimizer 86 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 87 | # Compute gradients 88 | grad = tfe.implicit_gradients(loss_fn) 89 | 90 | # Training 91 | average_loss = 0. 92 | average_acc = 0. 93 | for step in range(num_steps): 94 | 95 | # Iterate through the dataset 96 | d = dataset_iter.next() 97 | 98 | # Images 99 | x_batch = d[0] 100 | # Labels 101 | y_batch = tf.cast(d[1], dtype=tf.int64) 102 | 103 | # Compute the batch loss 104 | batch_loss = loss_fn(neural_net, x_batch, y_batch) 105 | average_loss += batch_loss 106 | # Compute the batch accuracy 107 | batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch) 108 | average_acc += batch_accuracy 109 | 110 | if step == 0: 111 | # Display the initial cost, before optimizing 112 | print("Initial loss= {:.9f}".format(average_loss)) 113 | 114 | # Update the variables following gradients info 115 | optimizer.apply_gradients(grad(neural_net, x_batch, y_batch)) 116 | 117 | # Display info 118 | if (step + 1) % display_step == 0 or step == 0: 119 | if step > 0: 120 | average_loss /= display_step 121 | average_acc /= display_step 122 | print("Step:", '%04d' % (step + 1), " loss=", 123 | "{:.9f}".format(average_loss), " accuracy=", 124 | "{:.4f}".format(average_acc)) 125 | average_loss = 0. 126 | average_acc = 0. 127 | 128 | # Evaluate model on the test image set 129 | testX = mnist.test.images 130 | testY = mnist.test.labels 131 | 132 | test_acc = accuracy_fn(neural_net, testX, testY) 133 | print("Testset Accuracy: {:.4f}".format(test_acc)) 134 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/neural_network_raw.py: -------------------------------------------------------------------------------- 1 | """ Neural Network. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | # Import MNIST data 17 | from tensorflow.examples.tutorials.mnist import input_data 18 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 19 | 20 | import tensorflow as tf 21 | 22 | # Parameters 23 | learning_rate = 0.1 24 | num_steps = 500 25 | batch_size = 128 26 | display_step = 100 27 | 28 | # Network Parameters 29 | n_hidden_1 = 256 # 1st layer number of neurons 30 | n_hidden_2 = 256 # 2nd layer number of neurons 31 | num_input = 784 # MNIST data input (img shape: 28*28) 32 | num_classes = 10 # MNIST total classes (0-9 digits) 33 | 34 | # tf Graph input 35 | X = tf.placeholder("float", [None, num_input]) 36 | Y = tf.placeholder("float", [None, num_classes]) 37 | 38 | # Store layers weight & bias 39 | weights = { 40 | 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 41 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 42 | 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) 43 | } 44 | biases = { 45 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 46 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 47 | 'out': tf.Variable(tf.random_normal([num_classes])) 48 | } 49 | 50 | 51 | # Create model 52 | def neural_net(x): 53 | # Hidden fully connected layer with 256 neurons 54 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 55 | # Hidden fully connected layer with 256 neurons 56 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 57 | # Output fully connected layer with a neuron for each class 58 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 59 | return out_layer 60 | 61 | # Construct model 62 | logits = neural_net(X) 63 | prediction = tf.nn.softmax(logits) 64 | 65 | # Define loss and optimizer 66 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 67 | logits=logits, labels=Y)) 68 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 69 | train_op = optimizer.minimize(loss_op) 70 | 71 | # Evaluate model 72 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 73 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 74 | 75 | # Initialize the variables (i.e. assign their default value) 76 | init = tf.global_variables_initializer() 77 | 78 | # Start training 79 | with tf.Session() as sess: 80 | 81 | # Run the initializer 82 | sess.run(init) 83 | 84 | for step in range(1, num_steps+1): 85 | batch_x, batch_y = mnist.train.next_batch(batch_size) 86 | # Run optimization op (backprop) 87 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 88 | if step % display_step == 0 or step == 1: 89 | # Calculate batch loss and accuracy 90 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 91 | Y: batch_y}) 92 | print("Step " + str(step) + ", Minibatch Loss= " + \ 93 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 94 | "{:.3f}".format(acc)) 95 | 96 | print("Optimization Finished!") 97 | 98 | # Calculate accuracy for MNIST test images 99 | print("Testing Accuracy:", \ 100 | sess.run(accuracy, feed_dict={X: mnist.test.images, 101 | Y: mnist.test.labels})) 102 | -------------------------------------------------------------------------------- /examples/3_NeuralNetworks/recurrent_network.py: -------------------------------------------------------------------------------- 1 | """ Recurrent Neural Network. 2 | 3 | A Recurrent Neural Network (LSTM) implementation example using TensorFlow library. 4 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Links: 7 | [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | import tensorflow as tf 17 | from tensorflow.contrib import rnn 18 | 19 | # Import MNIST data 20 | from tensorflow.examples.tutorials.mnist import input_data 21 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 22 | 23 | ''' 24 | To classify images using a recurrent neural network, we consider every image 25 | row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then 26 | handle 28 sequences of 28 steps for every sample. 27 | ''' 28 | 29 | # Training Parameters 30 | learning_rate = 0.001 31 | training_steps = 10000 32 | batch_size = 128 33 | display_step = 200 34 | 35 | # Network Parameters 36 | num_input = 28 # MNIST data input (img shape: 28*28) 37 | timesteps = 28 # timesteps 38 | num_hidden = 128 # hidden layer num of features 39 | num_classes = 10 # MNIST total classes (0-9 digits) 40 | 41 | # tf Graph input 42 | X = tf.placeholder("float", [None, timesteps, num_input]) 43 | Y = tf.placeholder("float", [None, num_classes]) 44 | 45 | # Define weights 46 | weights = { 47 | 'out': tf.Variable(tf.random_normal([num_hidden, num_classes])) 48 | } 49 | biases = { 50 | 'out': tf.Variable(tf.random_normal([num_classes])) 51 | } 52 | 53 | 54 | def RNN(x, weights, biases): 55 | 56 | # Prepare data shape to match `rnn` function requirements 57 | # Current data input shape: (batch_size, timesteps, n_input) 58 | # Required shape: 'timesteps' tensors list of shape (batch_size, n_input) 59 | 60 | # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input) 61 | x = tf.unstack(x, timesteps, 1) 62 | 63 | # Define a lstm cell with tensorflow 64 | lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 65 | 66 | # Get lstm cell output 67 | outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 68 | 69 | # Linear activation, using rnn inner loop last output 70 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 71 | 72 | logits = RNN(X, weights, biases) 73 | prediction = tf.nn.softmax(logits) 74 | 75 | # Define loss and optimizer 76 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 77 | logits=logits, labels=Y)) 78 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 79 | train_op = optimizer.minimize(loss_op) 80 | 81 | # Evaluate model (with test logits, for dropout to be disabled) 82 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 83 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 84 | 85 | # Initialize the variables (i.e. assign their default value) 86 | init = tf.global_variables_initializer() 87 | 88 | # Start training 89 | with tf.Session() as sess: 90 | 91 | # Run the initializer 92 | sess.run(init) 93 | 94 | for step in range(1, training_steps+1): 95 | batch_x, batch_y = mnist.train.next_batch(batch_size) 96 | # Reshape data to get 28 seq of 28 elements 97 | batch_x = batch_x.reshape((batch_size, timesteps, num_input)) 98 | # Run optimization op (backprop) 99 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 100 | if step % display_step == 0 or step == 1: 101 | # Calculate batch loss and accuracy 102 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 103 | Y: batch_y}) 104 | print("Step " + str(step) + ", Minibatch Loss= " + \ 105 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 106 | "{:.3f}".format(acc)) 107 | 108 | print("Optimization Finished!") 109 | 110 | # Calculate accuracy for 128 mnist test images 111 | test_len = 128 112 | test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input)) 113 | test_label = mnist.test.labels[:test_len] 114 | print("Testing Accuracy:", \ 115 | sess.run(accuracy, feed_dict={X: test_data, Y: test_label})) 116 | -------------------------------------------------------------------------------- /examples/4_Utils/save_restore_model.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Save and Restore a model using TensorFlow. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | # Import MNIST data 13 | from tensorflow.examples.tutorials.mnist import input_data 14 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 15 | 16 | import tensorflow as tf 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | batch_size = 100 21 | display_step = 1 22 | model_path = "/tmp/model.ckpt" 23 | 24 | # Network Parameters 25 | n_hidden_1 = 256 # 1st layer number of features 26 | n_hidden_2 = 256 # 2nd layer number of features 27 | n_input = 784 # MNIST data input (img shape: 28*28) 28 | n_classes = 10 # MNIST total classes (0-9 digits) 29 | 30 | # tf Graph input 31 | x = tf.placeholder("float", [None, n_input]) 32 | y = tf.placeholder("float", [None, n_classes]) 33 | 34 | 35 | # Create model 36 | def multilayer_perceptron(x, weights, biases): 37 | # Hidden layer with RELU activation 38 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 39 | layer_1 = tf.nn.relu(layer_1) 40 | # Hidden layer with RELU activation 41 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 42 | layer_2 = tf.nn.relu(layer_2) 43 | # Output layer with linear activation 44 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 45 | return out_layer 46 | 47 | # Store layers weight & bias 48 | weights = { 49 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 50 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 51 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 52 | } 53 | biases = { 54 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 55 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 56 | 'out': tf.Variable(tf.random_normal([n_classes])) 57 | } 58 | 59 | # Construct model 60 | pred = multilayer_perceptron(x, weights, biases) 61 | 62 | # Define loss and optimizer 63 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 64 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 65 | 66 | # Initialize the variables (i.e. assign their default value) 67 | init = tf.global_variables_initializer() 68 | 69 | # 'Saver' op to save and restore all the variables 70 | saver = tf.train.Saver() 71 | 72 | # Running first session 73 | print("Starting 1st session...") 74 | with tf.Session() as sess: 75 | 76 | # Run the initializer 77 | sess.run(init) 78 | 79 | # Training cycle 80 | for epoch in range(3): 81 | avg_cost = 0. 82 | total_batch = int(mnist.train.num_examples/batch_size) 83 | # Loop over all batches 84 | for i in range(total_batch): 85 | batch_x, batch_y = mnist.train.next_batch(batch_size) 86 | # Run optimization op (backprop) and cost op (to get loss value) 87 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 88 | y: batch_y}) 89 | # Compute average loss 90 | avg_cost += c / total_batch 91 | # Display logs per epoch step 92 | if epoch % display_step == 0: 93 | print("Epoch:", '%04d' % (epoch+1), "cost=", \ 94 | "{:.9f}".format(avg_cost)) 95 | print("First Optimization Finished!") 96 | 97 | # Test model 98 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 99 | # Calculate accuracy 100 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 101 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 102 | 103 | # Save model weights to disk 104 | save_path = saver.save(sess, model_path) 105 | print("Model saved in file: %s" % save_path) 106 | 107 | # Running a new session 108 | print("Starting 2nd session...") 109 | with tf.Session() as sess: 110 | # Initialize variables 111 | sess.run(init) 112 | 113 | # Restore model weights from previously saved model 114 | saver.restore(sess, model_path) 115 | print("Model restored from file: %s" % save_path) 116 | 117 | # Resume training 118 | for epoch in range(7): 119 | avg_cost = 0. 120 | total_batch = int(mnist.train.num_examples / batch_size) 121 | # Loop over all batches 122 | for i in range(total_batch): 123 | batch_x, batch_y = mnist.train.next_batch(batch_size) 124 | # Run optimization op (backprop) and cost op (to get loss value) 125 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 126 | y: batch_y}) 127 | # Compute average loss 128 | avg_cost += c / total_batch 129 | # Display logs per epoch step 130 | if epoch % display_step == 0: 131 | print("Epoch:", '%04d' % (epoch + 1), "cost=", \ 132 | "{:.9f}".format(avg_cost)) 133 | print("Second Optimization Finished!") 134 | 135 | # Test model 136 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 137 | # Calculate accuracy 138 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 139 | print("Accuracy:", accuracy.eval( 140 | {x: mnist.test.images, y: mnist.test.labels})) 141 | -------------------------------------------------------------------------------- /examples/4_Utils/tensorboard_advanced.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Graph and Loss visualization using Tensorboard. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | logs_path = '/tmp/tensorflow_logs/example/' 24 | 25 | # Network Parameters 26 | n_hidden_1 = 256 # 1st layer number of features 27 | n_hidden_2 = 256 # 2nd layer number of features 28 | n_input = 784 # MNIST data input (img shape: 28*28) 29 | n_classes = 10 # MNIST total classes (0-9 digits) 30 | 31 | # tf Graph Input 32 | # mnist data image of shape 28*28=784 33 | x = tf.placeholder(tf.float32, [None, 784], name='InputData') 34 | # 0-9 digits recognition => 10 classes 35 | y = tf.placeholder(tf.float32, [None, 10], name='LabelData') 36 | 37 | 38 | # Create model 39 | def multilayer_perceptron(x, weights, biases): 40 | # Hidden layer with RELU activation 41 | layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1']) 42 | layer_1 = tf.nn.relu(layer_1) 43 | # Create a summary to visualize the first layer ReLU activation 44 | tf.summary.histogram("relu1", layer_1) 45 | # Hidden layer with RELU activation 46 | layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2']) 47 | layer_2 = tf.nn.relu(layer_2) 48 | # Create another summary to visualize the second layer ReLU activation 49 | tf.summary.histogram("relu2", layer_2) 50 | # Output layer 51 | out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3']) 52 | return out_layer 53 | 54 | # Store layers weight & bias 55 | weights = { 56 | 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'), 57 | 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'), 58 | 'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3') 59 | } 60 | biases = { 61 | 'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'), 62 | 'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'), 63 | 'b3': tf.Variable(tf.random_normal([n_classes]), name='b3') 64 | } 65 | 66 | # Encapsulating all ops into scopes, making Tensorboard's Graph 67 | # Visualization more convenient 68 | with tf.name_scope('Model'): 69 | # Build model 70 | pred = multilayer_perceptron(x, weights, biases) 71 | 72 | with tf.name_scope('Loss'): 73 | # Softmax Cross entropy (cost function) 74 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 75 | 76 | with tf.name_scope('SGD'): 77 | # Gradient Descent 78 | optimizer = tf.train.GradientDescentOptimizer(learning_rate) 79 | # Op to calculate every variable gradient 80 | grads = tf.gradients(loss, tf.trainable_variables()) 81 | grads = list(zip(grads, tf.trainable_variables())) 82 | # Op to update all variables according to their gradient 83 | apply_grads = optimizer.apply_gradients(grads_and_vars=grads) 84 | 85 | with tf.name_scope('Accuracy'): 86 | # Accuracy 87 | acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 88 | acc = tf.reduce_mean(tf.cast(acc, tf.float32)) 89 | 90 | # Initialize the variables (i.e. assign their default value) 91 | init = tf.global_variables_initializer() 92 | 93 | # Create a summary to monitor cost tensor 94 | tf.summary.scalar("loss", loss) 95 | # Create a summary to monitor accuracy tensor 96 | tf.summary.scalar("accuracy", acc) 97 | # Create summaries to visualize weights 98 | for var in tf.trainable_variables(): 99 | tf.summary.histogram(var.name, var) 100 | # Summarize all gradients 101 | for grad, var in grads: 102 | tf.summary.histogram(var.name + '/gradient', grad) 103 | # Merge all summaries into a single op 104 | merged_summary_op = tf.summary.merge_all() 105 | 106 | # Start training 107 | with tf.Session() as sess: 108 | 109 | # Run the initializer 110 | sess.run(init) 111 | 112 | # op to write logs to Tensorboard 113 | summary_writer = tf.summary.FileWriter(logs_path, 114 | graph=tf.get_default_graph()) 115 | 116 | # Training cycle 117 | for epoch in range(training_epochs): 118 | avg_cost = 0. 119 | total_batch = int(mnist.train.num_examples/batch_size) 120 | # Loop over all batches 121 | for i in range(total_batch): 122 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 123 | # Run optimization op (backprop), cost op (to get loss value) 124 | # and summary nodes 125 | _, c, summary = sess.run([apply_grads, loss, merged_summary_op], 126 | feed_dict={x: batch_xs, y: batch_ys}) 127 | # Write logs at every iteration 128 | summary_writer.add_summary(summary, epoch * total_batch + i) 129 | # Compute average loss 130 | avg_cost += c / total_batch 131 | # Display logs per epoch step 132 | if (epoch+1) % display_step == 0: 133 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 134 | 135 | print("Optimization Finished!") 136 | 137 | # Test model 138 | # Calculate accuracy 139 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})) 140 | 141 | print("Run the command line:\n" \ 142 | "--> tensorboard --logdir=/tmp/tensorflow_logs " \ 143 | "\nThen open http://0.0.0.0:6006/ into your web browser") 144 | -------------------------------------------------------------------------------- /examples/4_Utils/tensorboard_basic.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Graph and Loss visualization using Tensorboard. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_epoch = 1 23 | logs_path = '/tmp/tensorflow_logs/example/' 24 | 25 | # tf Graph Input 26 | # mnist data image of shape 28*28=784 27 | x = tf.placeholder(tf.float32, [None, 784], name='InputData') 28 | # 0-9 digits recognition => 10 classes 29 | y = tf.placeholder(tf.float32, [None, 10], name='LabelData') 30 | 31 | # Set model weights 32 | W = tf.Variable(tf.zeros([784, 10]), name='Weights') 33 | b = tf.Variable(tf.zeros([10]), name='Bias') 34 | 35 | # Construct model and encapsulating all ops into scopes, making 36 | # Tensorboard's Graph visualization more convenient 37 | with tf.name_scope('Model'): 38 | # Model 39 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 40 | with tf.name_scope('Loss'): 41 | # Minimize error using cross entropy 42 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 43 | with tf.name_scope('SGD'): 44 | # Gradient Descent 45 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 46 | with tf.name_scope('Accuracy'): 47 | # Accuracy 48 | acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 49 | acc = tf.reduce_mean(tf.cast(acc, tf.float32)) 50 | 51 | # Initialize the variables (i.e. assign their default value) 52 | init = tf.global_variables_initializer() 53 | 54 | # Create a summary to monitor cost tensor 55 | tf.summary.scalar("loss", cost) 56 | # Create a summary to monitor accuracy tensor 57 | tf.summary.scalar("accuracy", acc) 58 | # Merge all summaries into a single op 59 | merged_summary_op = tf.summary.merge_all() 60 | 61 | # Start training 62 | with tf.Session() as sess: 63 | 64 | # Run the initializer 65 | sess.run(init) 66 | 67 | # op to write logs to Tensorboard 68 | summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) 69 | 70 | # Training cycle 71 | for epoch in range(training_epochs): 72 | avg_cost = 0. 73 | total_batch = int(mnist.train.num_examples/batch_size) 74 | # Loop over all batches 75 | for i in range(total_batch): 76 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 77 | # Run optimization op (backprop), cost op (to get loss value) 78 | # and summary nodes 79 | _, c, summary = sess.run([optimizer, cost, merged_summary_op], 80 | feed_dict={x: batch_xs, y: batch_ys}) 81 | # Write logs at every iteration 82 | summary_writer.add_summary(summary, epoch * total_batch + i) 83 | # Compute average loss 84 | avg_cost += c / total_batch 85 | # Display logs per epoch step 86 | if (epoch+1) % display_epoch == 0: 87 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 88 | 89 | print("Optimization Finished!") 90 | 91 | # Test model 92 | # Calculate accuracy 93 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})) 94 | 95 | print("Run the command line:\n" \ 96 | "--> tensorboard --logdir=/tmp/tensorflow_logs " \ 97 | "\nThen open http://0.0.0.0:6006/ into your web browser") 98 | -------------------------------------------------------------------------------- /examples/5_DataManagement/tensorflow_dataset_api.py: -------------------------------------------------------------------------------- 1 | """ TensorFlow Dataset API. 2 | 3 | In this example, we will show how to load numpy array data into the new 4 | TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline 5 | with queues, that make data processing and training faster (especially on GPU). 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data (Numpy format) 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | num_steps = 2000 21 | batch_size = 128 22 | display_step = 100 23 | 24 | # Network Parameters 25 | n_input = 784 # MNIST data input (img shape: 28*28) 26 | n_classes = 10 # MNIST total classes (0-9 digits) 27 | dropout = 0.75 # Dropout, probability to keep units 28 | 29 | sess = tf.Session() 30 | 31 | # Create a dataset tensor from the images and the labels 32 | dataset = tf.data.Dataset.from_tensor_slices( 33 | (mnist.train.images, mnist.train.labels)) 34 | # Automatically refill the data queue when empty 35 | dataset = dataset.repeat() 36 | # Create batches of data 37 | dataset = dataset.batch(batch_size) 38 | # Prefetch data for faster consumption 39 | dataset = dataset.prefetch(batch_size) 40 | 41 | # Create an iterator over the dataset 42 | iterator = dataset.make_initializable_iterator() 43 | # Initialize the iterator 44 | sess.run(iterator.initializer) 45 | 46 | # Neural Net Input (images, labels) 47 | X, Y = iterator.get_next() 48 | 49 | 50 | # ----------------------------------------------- 51 | # THIS IS A CLASSIC CNN (see examples, section 3) 52 | # ----------------------------------------------- 53 | # Note that a few elements have changed (usage of sess run). 54 | 55 | # Create model 56 | def conv_net(x, n_classes, dropout, reuse, is_training): 57 | # Define a scope for reusing the variables 58 | with tf.variable_scope('ConvNet', reuse=reuse): 59 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 60 | # Reshape to match picture format [Height x Width x Channel] 61 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 62 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 63 | 64 | # Convolution Layer with 32 filters and a kernel size of 5 65 | conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) 66 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 67 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 68 | 69 | # Convolution Layer with 32 filters and a kernel size of 5 70 | conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu) 71 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 72 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 73 | 74 | # Flatten the data to a 1-D vector for the fully connected layer 75 | fc1 = tf.contrib.layers.flatten(conv2) 76 | 77 | # Fully connected layer (in contrib folder for now) 78 | fc1 = tf.layers.dense(fc1, 1024) 79 | # Apply Dropout (if is_training is False, dropout is not applied) 80 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 81 | 82 | # Output layer, class prediction 83 | out = tf.layers.dense(fc1, n_classes) 84 | # Because 'softmax_cross_entropy_with_logits' already apply softmax, 85 | # we only apply softmax to testing network 86 | out = tf.nn.softmax(out) if not is_training else out 87 | 88 | return out 89 | 90 | 91 | # Because Dropout have different behavior at training and prediction time, we 92 | # need to create 2 distinct computation graphs that share the same weights. 93 | 94 | # Create a graph for training 95 | logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True) 96 | # Create another graph for testing that reuse the same weights, but has 97 | # different behavior for 'dropout' (not applied). 98 | logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False) 99 | 100 | # Define loss and optimizer (with train logits, for dropout to take effect) 101 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 102 | logits=logits_train, labels=Y)) 103 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 104 | train_op = optimizer.minimize(loss_op) 105 | 106 | # Evaluate model (with test logits, for dropout to be disabled) 107 | correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1)) 108 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 109 | 110 | # Initialize the variables (i.e. assign their default value) 111 | init = tf.global_variables_initializer() 112 | 113 | # Run the initializer 114 | sess.run(init) 115 | 116 | # Training cycle 117 | for step in range(1, num_steps + 1): 118 | 119 | # Run optimization 120 | sess.run(train_op) 121 | 122 | if step % display_step == 0 or step == 1: 123 | # Calculate batch loss and accuracy 124 | # (note that this consume a new batch of data) 125 | loss, acc = sess.run([loss_op, accuracy]) 126 | print("Step " + str(step) + ", Minibatch Loss= " + \ 127 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 128 | "{:.3f}".format(acc)) 129 | 130 | print("Optimization Finished!") 131 | -------------------------------------------------------------------------------- /examples/6_MultiGPU/multigpu_basics.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | ''' 3 | Basic Multi GPU computation example using TensorFlow library. 4 | 5 | Author: Aymeric Damien 6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 7 | ''' 8 | 9 | ''' 10 | This tutorial requires your machine to have 2 GPUs 11 | "/cpu:0": The CPU of your machine. 12 | "/gpu:0": The first GPU of your machine 13 | "/gpu:1": The second GPU of your machine 14 | ''' 15 | 16 | 17 | 18 | import numpy as np 19 | import tensorflow as tf 20 | import datetime 21 | 22 | # Processing Units logs 23 | log_device_placement = True 24 | 25 | # Num of multiplications to perform 26 | n = 10 27 | 28 | ''' 29 | Example: compute A^n + B^n on 2 GPUs 30 | Results on 8 cores with 2 GTX-980: 31 | * Single GPU computation time: 0:00:11.277449 32 | * Multi GPU computation time: 0:00:07.131701 33 | ''' 34 | # Create random large matrix 35 | A = np.random.rand(10000, 10000).astype('float32') 36 | B = np.random.rand(10000, 10000).astype('float32') 37 | 38 | # Create a graph to store results 39 | c1 = [] 40 | c2 = [] 41 | 42 | def matpow(M, n): 43 | if n < 1: #Abstract cases where n < 1 44 | return M 45 | else: 46 | return tf.matmul(M, matpow(M, n-1)) 47 | 48 | ''' 49 | Single GPU computing 50 | ''' 51 | with tf.device('/gpu:0'): 52 | a = tf.placeholder(tf.float32, [10000, 10000]) 53 | b = tf.placeholder(tf.float32, [10000, 10000]) 54 | # Compute A^n and B^n and store results in c1 55 | c1.append(matpow(a, n)) 56 | c1.append(matpow(b, n)) 57 | 58 | with tf.device('/cpu:0'): 59 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n 60 | 61 | t1_1 = datetime.datetime.now() 62 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 63 | # Run the op. 64 | sess.run(sum, {a:A, b:B}) 65 | t2_1 = datetime.datetime.now() 66 | 67 | 68 | ''' 69 | Multi GPU computing 70 | ''' 71 | # GPU:0 computes A^n 72 | with tf.device('/gpu:0'): 73 | # Compute A^n and store result in c2 74 | a = tf.placeholder(tf.float32, [10000, 10000]) 75 | c2.append(matpow(a, n)) 76 | 77 | # GPU:1 computes B^n 78 | with tf.device('/gpu:1'): 79 | # Compute B^n and store result in c2 80 | b = tf.placeholder(tf.float32, [10000, 10000]) 81 | c2.append(matpow(b, n)) 82 | 83 | with tf.device('/cpu:0'): 84 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n 85 | 86 | t1_2 = datetime.datetime.now() 87 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 88 | # Run the op. 89 | sess.run(sum, {a:A, b:B}) 90 | t2_2 = datetime.datetime.now() 91 | 92 | 93 | print("Single GPU computation time: " + str(t2_1-t1_1)) 94 | print("Multi GPU computation time: " + str(t2_2-t1_2)) 95 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Deprecated - Please Read 2 | 3 | Due to TensorFlow radically changing their API in v2, the examples index have been split between [v1](../tensorflow_v1) and [v2](../tensorflow_v2). 4 | 5 | The following examples are the original TF v1 examples, and will be deprecated entirely in favor of [tensorflow_v1](../tensorflow_v1) directory in a future release. 6 | -------------------------------------------------------------------------------- /notebooks/0_Prerequisite/ml_introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Machine Learning\n", 8 | "\n", 9 | "Prior to start browsing the examples, it may be useful that you get familiar with machine learning, as TensorFlow is mostly used for machine learning tasks (especially Neural Networks). You can find below a list of useful links, that can give you the basic knowledge required for this TensorFlow Tutorial.\n", 10 | "\n", 11 | "## Machine Learning\n", 12 | "\n", 13 | "- [An Introduction to Machine Learning Theory and Its Applications: A Visual Tutorial with Examples](https://www.toptal.com/machine-learning/machine-learning-theory-an-introductory-primer)\n", 14 | "- [A Gentle Guide to Machine Learning](https://blog.monkeylearn.com/a-gentle-guide-to-machine-learning/)\n", 15 | "- [A Visual Introduction to Machine Learning](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/)\n", 16 | "- [Introduction to Machine Learning](http://alex.smola.org/drafts/thebook.pdf)\n", 17 | "\n", 18 | "## Deep Learning & Neural Networks\n", 19 | "\n", 20 | "- [An Introduction to Neural Networks](http://www.cs.stir.ac.uk/~lss/NNIntro/InvSlides.html)\n", 21 | "- [An Introduction to Image Recognition with Deep Learning](https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721)\n", 22 | "- [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/index.html)\n", 23 | "\n" 24 | ] 25 | } 26 | ], 27 | "metadata": { 28 | "kernelspec": { 29 | "display_name": "IPython (Python 2.7)", 30 | "language": "python", 31 | "name": "python2" 32 | }, 33 | "language_info": { 34 | "codemirror_mode": { 35 | "name": "ipython", 36 | "version": 2 37 | }, 38 | "file_extension": ".py", 39 | "mimetype": "text/x-python", 40 | "name": "python", 41 | "nbconvert_exporter": "python", 42 | "pygments_lexer": "ipython2", 43 | "version": "2.7.11" 44 | } 45 | }, 46 | "nbformat": 4, 47 | "nbformat_minor": 0 48 | } 49 | -------------------------------------------------------------------------------- /notebooks/0_Prerequisite/mnist_dataset_intro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\n", 8 | "# MNIST Dataset Introduction\n", 9 | "\n", 10 | "Most examples are using MNIST dataset of handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n", 11 | "\n", 12 | "## Overview\n", 13 | "\n", 14 | "![MNIST Digits](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n", 15 | "\n", 16 | "## Usage\n", 17 | "In our examples, we are using TensorFlow [input_data.py](https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/mnist/input_data.py) script to load that dataset.\n", 18 | "It is quite useful for managing our data, and handle:\n", 19 | "\n", 20 | "- Dataset downloading\n", 21 | "\n", 22 | "- Loading the entire dataset into numpy array: \n", 23 | "\n", 24 | "\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "# Import MNIST\n", 36 | "from tensorflow.examples.tutorials.mnist import input_data\n", 37 | "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n", 38 | "\n", 39 | "# Load data\n", 40 | "X_train = mnist.train.images\n", 41 | "Y_train = mnist.train.labels\n", 42 | "X_test = mnist.test.images\n", 43 | "Y_test = mnist.test.labels" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "- A `next_batch` function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset)." 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": { 57 | "collapsed": true 58 | }, 59 | "outputs": [], 60 | "source": [ 61 | "# Get the next 64 images array and labels\n", 62 | "batch_X, batch_Y = mnist.train.next_batch(64)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "Link: http://yann.lecun.com/exdb/mnist/" 70 | ] 71 | } 72 | ], 73 | "metadata": { 74 | "kernelspec": { 75 | "display_name": "Python 2", 76 | "language": "python", 77 | "name": "python2" 78 | }, 79 | "language_info": { 80 | "codemirror_mode": { 81 | "name": "ipython", 82 | "version": 2 83 | }, 84 | "file_extension": ".py", 85 | "mimetype": "text/x-python", 86 | "name": "python", 87 | "nbconvert_exporter": "python", 88 | "pygments_lexer": "ipython2", 89 | "version": "2.7.13" 90 | } 91 | }, 92 | "nbformat": 4, 93 | "nbformat_minor": 0 94 | } 95 | -------------------------------------------------------------------------------- /notebooks/1_Introduction/helloworld.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "# Simple hello world using TensorFlow\n", 23 | "\n", 24 | "# Create a Constant op\n", 25 | "# The op is added as a node to the default graph.\n", 26 | "#\n", 27 | "# The value returned by the constructor represents the output\n", 28 | "# of the Constant op.\n", 29 | "\n", 30 | "hello = tf.constant('Hello, TensorFlow!')" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": { 37 | "collapsed": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "# Start tf session\n", 42 | "sess = tf.Session()" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 4, 48 | "metadata": { 49 | "collapsed": false 50 | }, 51 | "outputs": [ 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "Hello, TensorFlow!\n" 57 | ] 58 | } 59 | ], 60 | "source": [ 61 | "# Run graph\n", 62 | "print(sess.run(hello))" 63 | ] 64 | } 65 | ], 66 | "metadata": { 67 | "kernelspec": { 68 | "display_name": "IPython (Python 2.7)", 69 | "language": "python", 70 | "name": "python2" 71 | }, 72 | "language_info": { 73 | "codemirror_mode": { 74 | "name": "ipython", 75 | "version": 2.0 76 | }, 77 | "file_extension": ".py", 78 | "mimetype": "text/x-python", 79 | "name": "python", 80 | "nbconvert_exporter": "python", 81 | "pygments_lexer": "ipython2", 82 | "version": "2.7.8" 83 | } 84 | }, 85 | "nbformat": 4, 86 | "nbformat_minor": 0 87 | } 88 | -------------------------------------------------------------------------------- /notebooks/6_MultiGPU/multigpu_basics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Multi-GPU Basics\n", 10 | "\n", 11 | "Basic Multi-GPU computation example using TensorFlow library.\n", 12 | "\n", 13 | "This tutorial requires your machine to have 2 GPUs\n", 14 | "\"/cpu:0\": The CPU of your machine.\n", 15 | "\"/gpu:0\": The first GPU of your machine\n", 16 | "\"/gpu:1\": The second GPU of your machine\n", 17 | "For this example, we are using 2 GTX-980\n", 18 | "\n", 19 | "- Author: Aymeric Damien\n", 20 | "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": { 27 | "collapsed": true 28 | }, 29 | "outputs": [], 30 | "source": [ 31 | "import numpy as np\n", 32 | "import tensorflow as tf\n", 33 | "import datetime" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "metadata": { 40 | "collapsed": true 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "#Processing Units logs\n", 45 | "log_device_placement = True\n", 46 | "\n", 47 | "#num of multiplications to perform\n", 48 | "n = 10" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "collapsed": false 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# Example: compute A^n + B^n on 2 GPUs\n", 60 | "\n", 61 | "# Create random large matrix\n", 62 | "A = np.random.rand(1e4, 1e4).astype('float32')\n", 63 | "B = np.random.rand(1e4, 1e4).astype('float32')\n", 64 | "\n", 65 | "# Creates a graph to store results\n", 66 | "c1 = []\n", 67 | "c2 = []\n", 68 | "\n", 69 | "# Define matrix power\n", 70 | "def matpow(M, n):\n", 71 | " if n < 1: #Abstract cases where n < 1\n", 72 | " return M\n", 73 | " else:\n", 74 | " return tf.matmul(M, matpow(M, n-1))" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 6, 80 | "metadata": { 81 | "collapsed": true 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "# Single GPU computing\n", 86 | "\n", 87 | "with tf.device('/gpu:0'):\n", 88 | " a = tf.constant(A)\n", 89 | " b = tf.constant(B)\n", 90 | " #compute A^n and B^n and store results in c1\n", 91 | " c1.append(matpow(a, n))\n", 92 | " c1.append(matpow(b, n))\n", 93 | "\n", 94 | "with tf.device('/cpu:0'):\n", 95 | " sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n\n", 96 | "\n", 97 | "t1_1 = datetime.datetime.now()\n", 98 | "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n", 99 | " # Runs the op.\n", 100 | " sess.run(sum)\n", 101 | "t2_1 = datetime.datetime.now()" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 7, 107 | "metadata": { 108 | "collapsed": true 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "# Multi GPU computing\n", 113 | "# GPU:0 computes A^n\n", 114 | "with tf.device('/gpu:0'):\n", 115 | " #compute A^n and store result in c2\n", 116 | " a = tf.constant(A)\n", 117 | " c2.append(matpow(a, n))\n", 118 | "\n", 119 | "#GPU:1 computes B^n\n", 120 | "with tf.device('/gpu:1'):\n", 121 | " #compute B^n and store result in c2\n", 122 | " b = tf.constant(B)\n", 123 | " c2.append(matpow(b, n))\n", 124 | "\n", 125 | "with tf.device('/cpu:0'):\n", 126 | " sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n\n", 127 | "\n", 128 | "t1_2 = datetime.datetime.now()\n", 129 | "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n", 130 | " # Runs the op.\n", 131 | " sess.run(sum)\n", 132 | "t2_2 = datetime.datetime.now()" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 8, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "Single GPU computation time: 0:00:11.833497\n", 147 | "Multi GPU computation time: 0:00:07.085913\n" 148 | ] 149 | } 150 | ], 151 | "source": [ 152 | "print \"Single GPU computation time: \" + str(t2_1-t1_1)\n", 153 | "print \"Multi GPU computation time: \" + str(t2_2-t1_2)" 154 | ] 155 | } 156 | ], 157 | "metadata": { 158 | "anaconda-cloud": {}, 159 | "kernelspec": { 160 | "display_name": "Python [default]", 161 | "language": "python", 162 | "name": "python2" 163 | }, 164 | "language_info": { 165 | "codemirror_mode": { 166 | "name": "ipython", 167 | "version": 2 168 | }, 169 | "file_extension": ".py", 170 | "mimetype": "text/x-python", 171 | "name": "python", 172 | "nbconvert_exporter": "python", 173 | "pygments_lexer": "ipython2", 174 | "version": "2.7.12" 175 | } 176 | }, 177 | "nbformat": 4, 178 | "nbformat_minor": 0 179 | } 180 | -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | ## Deprecated - Please Read 2 | 3 | Due to TensorFlow radically changing their API in v2, the examples index have been split between [v1](../tensorflow_v1) and [v2](../tensorflow_v2). 4 | 5 | The following examples are the original TF v1 examples, and will be deprecated entirely in favor of [tensorflow_v1](../tensorflow_v1) directory in a future release. 6 | -------------------------------------------------------------------------------- /resources/img/tensorboard_advanced_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_advanced_1.png -------------------------------------------------------------------------------- /resources/img/tensorboard_advanced_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_advanced_2.png -------------------------------------------------------------------------------- /resources/img/tensorboard_advanced_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_advanced_3.png -------------------------------------------------------------------------------- /resources/img/tensorboard_advanced_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_advanced_4.png -------------------------------------------------------------------------------- /resources/img/tensorboard_basic_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_basic_1.png -------------------------------------------------------------------------------- /resources/img/tensorboard_basic_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tensorboard_basic_2.png -------------------------------------------------------------------------------- /resources/img/tf2/tensorboard1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tf2/tensorboard1.png -------------------------------------------------------------------------------- /resources/img/tf2/tensorboard2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tf2/tensorboard2.png -------------------------------------------------------------------------------- /resources/img/tf2/tensorboard3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tf2/tensorboard3.png -------------------------------------------------------------------------------- /resources/img/tf2/tensorboard4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymericdamien/TensorFlow-Examples/6dcbe14649163814e72a22a999f20c5e247ce988/resources/img/tf2/tensorboard4.png -------------------------------------------------------------------------------- /tensorflow_v1/examples/1_Introduction/basic_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Basic introduction to TensorFlow's Eager API. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | 7 | What is Eager API? 8 | " Eager execution is an imperative, define-by-run interface where operations are 9 | executed immediately as they are called from Python. This makes it easier to 10 | get started with TensorFlow, and can make research and development more 11 | intuitive. A vast majority of the TensorFlow API remains the same whether eager 12 | execution is enabled or not. As a result, the exact same code that constructs 13 | TensorFlow graphs (e.g. using the layers API) can be executed imperatively 14 | by using eager execution. Conversely, most models written with Eager enabled 15 | can be converted to a graph that can be further optimized and/or extracted 16 | for deployment in production without changing code. " - Rajat Monga 17 | 18 | ''' 19 | from __future__ import absolute_import, division, print_function 20 | 21 | import numpy as np 22 | import tensorflow as tf 23 | import tensorflow.contrib.eager as tfe 24 | 25 | # Set Eager API 26 | print("Setting Eager mode...") 27 | tfe.enable_eager_execution() 28 | 29 | # Define constant tensors 30 | print("Define constant tensors") 31 | a = tf.constant(2) 32 | print("a = %i" % a) 33 | b = tf.constant(3) 34 | print("b = %i" % b) 35 | 36 | # Run the operation without the need for tf.Session 37 | print("Running operations, without tf.Session") 38 | c = a + b 39 | print("a + b = %i" % c) 40 | d = a * b 41 | print("a * b = %i" % d) 42 | 43 | 44 | # Full compatibility with Numpy 45 | print("Mixing operations with Tensors and Numpy Arrays") 46 | 47 | # Define constant tensors 48 | a = tf.constant([[2., 1.], 49 | [1., 0.]], dtype=tf.float32) 50 | print("Tensor:\n a = %s" % a) 51 | b = np.array([[3., 0.], 52 | [5., 1.]], dtype=np.float32) 53 | print("NumpyArray:\n b = %s" % b) 54 | 55 | # Run the operation without the need for tf.Session 56 | print("Running operations, without tf.Session") 57 | 58 | c = a + b 59 | print("a + b = %s" % c) 60 | 61 | d = tf.matmul(a, b) 62 | print("a * b = %s" % d) 63 | 64 | print("Iterate through Tensor 'a':") 65 | for i in range(a.shape[0]): 66 | for j in range(a.shape[1]): 67 | print(a[i][j]) 68 | 69 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/1_Introduction/basic_operations.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Basic Operations example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Basic constant operations 13 | # The value returned by the constructor represents the output 14 | # of the Constant op. 15 | a = tf.constant(2) 16 | b = tf.constant(3) 17 | 18 | # Launch the default graph. 19 | with tf.Session() as sess: 20 | print("a=2, b=3") 21 | print("Addition with constants: %i" % sess.run(a+b)) 22 | print("Multiplication with constants: %i" % sess.run(a*b)) 23 | 24 | # Basic Operations with variable as graph input 25 | # The value returned by the constructor represents the output 26 | # of the Variable op. (define as input when running session) 27 | # tf Graph input 28 | a = tf.placeholder(tf.int16) 29 | b = tf.placeholder(tf.int16) 30 | 31 | # Define some operations 32 | add = tf.add(a, b) 33 | mul = tf.multiply(a, b) 34 | 35 | # Launch the default graph. 36 | with tf.Session() as sess: 37 | # Run every operation with variable input 38 | print("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})) 39 | print("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})) 40 | 41 | 42 | # ---------------- 43 | # More in details: 44 | # Matrix Multiplication from TensorFlow official tutorial 45 | 46 | # Create a Constant op that produces a 1x2 matrix. The op is 47 | # added as a node to the default graph. 48 | # 49 | # The value returned by the constructor represents the output 50 | # of the Constant op. 51 | matrix1 = tf.constant([[3., 3.]]) 52 | 53 | # Create another Constant that produces a 2x1 matrix. 54 | matrix2 = tf.constant([[2.],[2.]]) 55 | 56 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs. 57 | # The returned value, 'product', represents the result of the matrix 58 | # multiplication. 59 | product = tf.matmul(matrix1, matrix2) 60 | 61 | # To run the matmul op we call the session 'run()' method, passing 'product' 62 | # which represents the output of the matmul op. This indicates to the call 63 | # that we want to get the output of the matmul op back. 64 | # 65 | # All inputs needed by the op are run automatically by the session. They 66 | # typically are run in parallel. 67 | # 68 | # The call 'run(product)' thus causes the execution of threes ops in the 69 | # graph: the two constants and matmul. 70 | # 71 | # The output of the op is returned in 'result' as a numpy `ndarray` object. 72 | with tf.Session() as sess: 73 | result = sess.run(product) 74 | print(result) 75 | # ==> [[ 12.]] 76 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/1_Introduction/helloworld.py: -------------------------------------------------------------------------------- 1 | ''' 2 | HelloWorld example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | 12 | # Simple hello world using TensorFlow 13 | 14 | # Create a Constant op 15 | # The op is added as a node to the default graph. 16 | # 17 | # The value returned by the constructor represents the output 18 | # of the Constant op. 19 | hello = tf.constant('Hello, TensorFlow!') 20 | 21 | # Start tf session 22 | sess = tf.Session() 23 | 24 | # Run the op 25 | print(sess.run(hello)) 26 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/gradient_boosted_decision_tree.py: -------------------------------------------------------------------------------- 1 | """ Gradient Boosted Decision Tree (GBDT). 2 | 3 | Implement a Gradient Boosted Decision tree with TensorFlow to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | import tensorflow as tf 17 | from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier 18 | from tensorflow.contrib.boosted_trees.proto import learner_pb2 as gbdt_learner 19 | 20 | # Ignore all GPUs (current TF GBDT does not support GPU). 21 | import os 22 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 23 | 24 | # Import MNIST data 25 | # Set verbosity to display errors only (Remove this line for showing warnings) 26 | tf.logging.set_verbosity(tf.logging.ERROR) 27 | from tensorflow.examples.tutorials.mnist import input_data 28 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False, 29 | source_url='http://yann.lecun.com/exdb/mnist/') 30 | 31 | # Parameters 32 | batch_size = 4096 # The number of samples per batch 33 | num_classes = 10 # The 10 digits 34 | num_features = 784 # Each image is 28x28 pixels 35 | max_steps = 10000 36 | 37 | # GBDT Parameters 38 | learning_rate = 0.1 39 | l1_regul = 0. 40 | l2_regul = 1. 41 | examples_per_layer = 1000 42 | num_trees = 10 43 | max_depth = 16 44 | 45 | # Fill GBDT parameters into the config proto 46 | learner_config = gbdt_learner.LearnerConfig() 47 | learner_config.learning_rate_tuner.fixed.learning_rate = learning_rate 48 | learner_config.regularization.l1 = l1_regul 49 | learner_config.regularization.l2 = l2_regul / examples_per_layer 50 | learner_config.constraints.max_tree_depth = max_depth 51 | growing_mode = gbdt_learner.LearnerConfig.LAYER_BY_LAYER 52 | learner_config.growing_mode = growing_mode 53 | run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300) 54 | learner_config.multi_class_strategy = ( 55 | gbdt_learner.LearnerConfig.DIAGONAL_HESSIAN)\ 56 | 57 | # Create a TensorFlor GBDT Estimator 58 | gbdt_model = GradientBoostedDecisionTreeClassifier( 59 | model_dir=None, # No save directory specified 60 | learner_config=learner_config, 61 | n_classes=num_classes, 62 | examples_per_layer=examples_per_layer, 63 | num_trees=num_trees, 64 | center_bias=False, 65 | config=run_config) 66 | 67 | # Display TF info logs 68 | tf.logging.set_verbosity(tf.logging.INFO) 69 | 70 | # Define the input function for training 71 | input_fn = tf.estimator.inputs.numpy_input_fn( 72 | x={'images': mnist.train.images}, y=mnist.train.labels, 73 | batch_size=batch_size, num_epochs=None, shuffle=True) 74 | # Train the Model 75 | gbdt_model.fit(input_fn=input_fn, max_steps=max_steps) 76 | 77 | # Evaluate the Model 78 | # Define the input function for evaluating 79 | input_fn = tf.estimator.inputs.numpy_input_fn( 80 | x={'images': mnist.test.images}, y=mnist.test.labels, 81 | batch_size=batch_size, shuffle=False) 82 | # Use the Estimator 'evaluate' method 83 | e = gbdt_model.evaluate(input_fn=input_fn) 84 | 85 | print("Testing Accuracy:", e['accuracy']) 86 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/kmeans.py: -------------------------------------------------------------------------------- 1 | """ K-Means. 2 | 3 | Implement K-Means algorithm with TensorFlow, and apply it to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Note: This example requires TensorFlow v1.1.0 or over. 8 | 9 | Author: Aymeric Damien 10 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 11 | """ 12 | 13 | from __future__ import print_function 14 | 15 | import numpy as np 16 | import tensorflow as tf 17 | from tensorflow.contrib.factorization import KMeans 18 | 19 | # Ignore all GPUs, tf k-means does not benefit from it. 20 | import os 21 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | full_data_x = mnist.train.images 27 | 28 | # Parameters 29 | num_steps = 50 # Total steps to train 30 | batch_size = 1024 # The number of samples per batch 31 | k = 25 # The number of clusters 32 | num_classes = 10 # The 10 digits 33 | num_features = 784 # Each image is 28x28 pixels 34 | 35 | # Input images 36 | X = tf.placeholder(tf.float32, shape=[None, num_features]) 37 | # Labels (for assigning a label to a centroid and testing) 38 | Y = tf.placeholder(tf.float32, shape=[None, num_classes]) 39 | 40 | # K-Means Parameters 41 | kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine', 42 | use_mini_batch=True) 43 | 44 | # Build KMeans graph 45 | training_graph = kmeans.training_graph() 46 | 47 | if len(training_graph) > 6: # Tensorflow 1.4+ 48 | (all_scores, cluster_idx, scores, cluster_centers_initialized, 49 | cluster_centers_var, init_op, train_op) = training_graph 50 | else: 51 | (all_scores, cluster_idx, scores, cluster_centers_initialized, 52 | init_op, train_op) = training_graph 53 | 54 | cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple 55 | avg_distance = tf.reduce_mean(scores) 56 | 57 | # Initialize the variables (i.e. assign their default value) 58 | init_vars = tf.global_variables_initializer() 59 | 60 | # Start TensorFlow session 61 | sess = tf.Session() 62 | 63 | # Run the initializer 64 | sess.run(init_vars, feed_dict={X: full_data_x}) 65 | sess.run(init_op, feed_dict={X: full_data_x}) 66 | 67 | # Training 68 | for i in range(1, num_steps + 1): 69 | _, d, idx = sess.run([train_op, avg_distance, cluster_idx], 70 | feed_dict={X: full_data_x}) 71 | if i % 10 == 0 or i == 1: 72 | print("Step %i, Avg Distance: %f" % (i, d)) 73 | 74 | # Assign a label to each centroid 75 | # Count total number of labels per centroid, using the label of each training 76 | # sample to their closest centroid (given by 'idx') 77 | counts = np.zeros(shape=(k, num_classes)) 78 | for i in range(len(idx)): 79 | counts[idx[i]] += mnist.train.labels[i] 80 | # Assign the most frequent label to the centroid 81 | labels_map = [np.argmax(c) for c in counts] 82 | labels_map = tf.convert_to_tensor(labels_map) 83 | 84 | # Evaluation ops 85 | # Lookup: centroid_id -> label 86 | cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx) 87 | # Compute accuracy 88 | correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32)) 89 | accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 90 | 91 | # Test Model 92 | test_x, test_y = mnist.test.images, mnist.test.labels 93 | print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y})) 94 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/linear_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A linear regression learning algorithm example using TensorFlow library. 3 | 4 | Author: Aymeric Damien 5 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 6 | ''' 7 | 8 | from __future__ import print_function 9 | 10 | import tensorflow as tf 11 | import numpy 12 | import matplotlib.pyplot as plt 13 | rng = numpy.random 14 | 15 | # Parameters 16 | learning_rate = 0.01 17 | training_epochs = 1000 18 | display_step = 50 19 | 20 | # Training Data 21 | train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 22 | 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) 23 | train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 24 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) 25 | n_samples = train_X.shape[0] 26 | 27 | # tf Graph Input 28 | X = tf.placeholder("float") 29 | Y = tf.placeholder("float") 30 | 31 | # Set model weights 32 | W = tf.Variable(rng.randn(), name="weight") 33 | b = tf.Variable(rng.randn(), name="bias") 34 | 35 | # Construct a linear model 36 | pred = tf.add(tf.multiply(X, W), b) 37 | 38 | # Mean squared error 39 | cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples) 40 | # Gradient descent 41 | # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default 42 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 43 | 44 | # Initialize the variables (i.e. assign their default value) 45 | init = tf.global_variables_initializer() 46 | 47 | # Start training 48 | with tf.Session() as sess: 49 | 50 | # Run the initializer 51 | sess.run(init) 52 | 53 | # Fit all training data 54 | for epoch in range(training_epochs): 55 | for (x, y) in zip(train_X, train_Y): 56 | sess.run(optimizer, feed_dict={X: x, Y: y}) 57 | 58 | # Display logs per epoch step 59 | if (epoch+1) % display_step == 0: 60 | c = sess.run(cost, feed_dict={X: train_X, Y:train_Y}) 61 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \ 62 | "W=", sess.run(W), "b=", sess.run(b)) 63 | 64 | print("Optimization Finished!") 65 | training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) 66 | print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n') 67 | 68 | # Graphic display 69 | plt.plot(train_X, train_Y, 'ro', label='Original data') 70 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 71 | plt.legend() 72 | plt.show() 73 | 74 | # Testing example, as requested (Issue #2) 75 | test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1]) 76 | test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03]) 77 | 78 | print("Testing... (Mean square loss Comparison)") 79 | testing_cost = sess.run( 80 | tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]), 81 | feed_dict={X: test_X, Y: test_Y}) # same function as cost above 82 | print("Testing cost=", testing_cost) 83 | print("Absolute mean square loss difference:", abs( 84 | training_cost - testing_cost)) 85 | 86 | plt.plot(test_X, test_Y, 'bo', label='Testing data') 87 | plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') 88 | plt.legend() 89 | plt.show() 90 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/linear_regression_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' Linear Regression with Eager API. 2 | 3 | A linear regression learning algorithm example using TensorFlow's Eager API. 4 | 5 | Author: Aymeric Damien 6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 7 | ''' 8 | from __future__ import absolute_import, division, print_function 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | import tensorflow as tf 13 | 14 | # Set Eager API 15 | tf.enable_eager_execution() 16 | tfe = tf.contrib.eager 17 | 18 | # Training Data 19 | train_X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 20 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1] 21 | train_Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 22 | 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3] 23 | n_samples = len(train_X) 24 | 25 | # Parameters 26 | learning_rate = 0.01 27 | display_step = 100 28 | num_steps = 1000 29 | 30 | # Weight and Bias 31 | W = tfe.Variable(np.random.randn()) 32 | b = tfe.Variable(np.random.randn()) 33 | 34 | 35 | # Linear regression (Wx + b) 36 | def linear_regression(inputs): 37 | return inputs * W + b 38 | 39 | 40 | # Mean square error 41 | def mean_square_fn(model_fn, inputs, labels): 42 | return tf.reduce_sum(tf.pow(model_fn(inputs) - labels, 2)) / (2 * n_samples) 43 | 44 | 45 | # SGD Optimizer 46 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 47 | # Compute gradients 48 | grad = tfe.implicit_gradients(mean_square_fn) 49 | 50 | # Initial cost, before optimizing 51 | print("Initial cost= {:.9f}".format( 52 | mean_square_fn(linear_regression, train_X, train_Y)), 53 | "W=", W.numpy(), "b=", b.numpy()) 54 | 55 | # Training 56 | for step in range(num_steps): 57 | 58 | optimizer.apply_gradients(grad(linear_regression, train_X, train_Y)) 59 | 60 | if (step + 1) % display_step == 0 or step == 0: 61 | print("Epoch:", '%04d' % (step + 1), "cost=", 62 | "{:.9f}".format(mean_square_fn(linear_regression, train_X, train_Y)), 63 | "W=", W.numpy(), "b=", b.numpy()) 64 | 65 | # Graphic display 66 | plt.plot(train_X, train_Y, 'ro', label='Original data') 67 | plt.plot(train_X, np.array(W * train_X + b), label='Fitted line') 68 | plt.legend() 69 | plt.show() 70 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/logistic_regression.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A logistic regression learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_step = 1 23 | 24 | # tf Graph Input 25 | x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 26 | y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes 27 | 28 | # Set model weights 29 | W = tf.Variable(tf.zeros([784, 10])) 30 | b = tf.Variable(tf.zeros([10])) 31 | 32 | # Construct model 33 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 34 | 35 | # Minimize error using cross entropy 36 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 37 | # Gradient Descent 38 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 39 | 40 | # Initialize the variables (i.e. assign their default value) 41 | init = tf.global_variables_initializer() 42 | 43 | # Start training 44 | with tf.Session() as sess: 45 | 46 | # Run the initializer 47 | sess.run(init) 48 | 49 | # Training cycle 50 | for epoch in range(training_epochs): 51 | avg_cost = 0. 52 | total_batch = int(mnist.train.num_examples/batch_size) 53 | # Loop over all batches 54 | for i in range(total_batch): 55 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 56 | # Run optimization op (backprop) and cost op (to get loss value) 57 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, 58 | y: batch_ys}) 59 | # Compute average loss 60 | avg_cost += c / total_batch 61 | # Display logs per epoch step 62 | if (epoch+1) % display_step == 0: 63 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 64 | 65 | print("Optimization Finished!") 66 | 67 | # Test model 68 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 69 | # Calculate accuracy 70 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 71 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 72 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/logistic_regression_eager_api.py: -------------------------------------------------------------------------------- 1 | ''' Logistic Regression with Eager API. 2 | 3 | A logistic regression learning algorithm example using TensorFlow's Eager API. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | ''' 10 | from __future__ import absolute_import, division, print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Set Eager API 15 | tf.enable_eager_execution() 16 | tfe = tf.contrib.eager 17 | 18 | # Import MNIST data 19 | from tensorflow.examples.tutorials.mnist import input_data 20 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 21 | 22 | # Parameters 23 | learning_rate = 0.1 24 | batch_size = 128 25 | num_steps = 1000 26 | display_step = 100 27 | 28 | dataset = tf.data.Dataset.from_tensor_slices( 29 | (mnist.train.images, mnist.train.labels)) 30 | dataset = dataset.repeat().batch(batch_size).prefetch(batch_size) 31 | dataset_iter = tfe.Iterator(dataset) 32 | 33 | # Variables 34 | W = tfe.Variable(tf.zeros([784, 10]), name='weights') 35 | b = tfe.Variable(tf.zeros([10]), name='bias') 36 | 37 | 38 | # Logistic regression (Wx + b) 39 | def logistic_regression(inputs): 40 | return tf.matmul(inputs, W) + b 41 | 42 | 43 | # Cross-Entropy loss function 44 | def loss_fn(inference_fn, inputs, labels): 45 | # Using sparse_softmax cross entropy 46 | return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 47 | logits=inference_fn(inputs), labels=labels)) 48 | 49 | 50 | # Calculate accuracy 51 | def accuracy_fn(inference_fn, inputs, labels): 52 | prediction = tf.nn.softmax(inference_fn(inputs)) 53 | correct_pred = tf.equal(tf.argmax(prediction, 1), labels) 54 | return tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 55 | 56 | 57 | # SGD Optimizer 58 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 59 | # Compute gradients 60 | grad = tfe.implicit_gradients(loss_fn) 61 | 62 | # Training 63 | average_loss = 0. 64 | average_acc = 0. 65 | for step in range(num_steps): 66 | 67 | # Iterate through the dataset 68 | d = dataset_iter.next() 69 | 70 | # Images 71 | x_batch = d[0] 72 | # Labels 73 | y_batch = tf.cast(d[1], dtype=tf.int64) 74 | 75 | # Compute the batch loss 76 | batch_loss = loss_fn(logistic_regression, x_batch, y_batch) 77 | average_loss += batch_loss 78 | # Compute the batch accuracy 79 | batch_accuracy = accuracy_fn(logistic_regression, x_batch, y_batch) 80 | average_acc += batch_accuracy 81 | 82 | if step == 0: 83 | # Display the initial cost, before optimizing 84 | print("Initial loss= {:.9f}".format(average_loss)) 85 | 86 | # Update the variables following gradients info 87 | optimizer.apply_gradients(grad(logistic_regression, x_batch, y_batch)) 88 | 89 | # Display info 90 | if (step + 1) % display_step == 0 or step == 0: 91 | if step > 0: 92 | average_loss /= display_step 93 | average_acc /= display_step 94 | print("Step:", '%04d' % (step + 1), " loss=", 95 | "{:.9f}".format(average_loss), " accuracy=", 96 | "{:.4f}".format(average_acc)) 97 | average_loss = 0. 98 | average_acc = 0. 99 | 100 | # Evaluate model on the test image set 101 | testX = mnist.test.images 102 | testY = mnist.test.labels 103 | 104 | test_acc = accuracy_fn(logistic_regression, testX, testY) 105 | print("Testset Accuracy: {:.4f}".format(test_acc)) 106 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/nearest_neighbor.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A nearest neighbor learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import numpy as np 13 | import tensorflow as tf 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | # In this example, we limit mnist data 20 | Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) 21 | Xte, Yte = mnist.test.next_batch(200) #200 for testing 22 | 23 | # tf Graph Input 24 | xtr = tf.placeholder("float", [None, 784]) 25 | xte = tf.placeholder("float", [784]) 26 | 27 | # Nearest Neighbor calculation using L1 Distance 28 | # Calculate L1 Distance 29 | distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) 30 | # Prediction: Get min distance index (Nearest neighbor) 31 | pred = tf.arg_min(distance, 0) 32 | 33 | accuracy = 0. 34 | 35 | # Initialize the variables (i.e. assign their default value) 36 | init = tf.global_variables_initializer() 37 | 38 | # Start training 39 | with tf.Session() as sess: 40 | 41 | # Run the initializer 42 | sess.run(init) 43 | 44 | # loop over test data 45 | for i in range(len(Xte)): 46 | # Get nearest neighbor 47 | nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) 48 | # Get nearest neighbor class label and compare it to its true label 49 | print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \ 50 | "True Class:", np.argmax(Yte[i])) 51 | # Calculate accuracy 52 | if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): 53 | accuracy += 1./len(Xte) 54 | print("Done!") 55 | print("Accuracy:", accuracy) 56 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/2_BasicModels/random_forest.py: -------------------------------------------------------------------------------- 1 | """ Random Forest. 2 | 3 | Implement Random Forest algorithm with TensorFlow, and apply it to classify 4 | handwritten digit images. This example is using the MNIST database of 5 | handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | 11 | from __future__ import print_function 12 | 13 | import tensorflow as tf 14 | from tensorflow.contrib.tensor_forest.python import tensor_forest 15 | from tensorflow.python.ops import resources 16 | 17 | # Ignore all GPUs, tf random forest does not benefit from it. 18 | import os 19 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 20 | 21 | # Import MNIST data 22 | from tensorflow.examples.tutorials.mnist import input_data 23 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 24 | 25 | # Parameters 26 | num_steps = 500 # Total steps to train 27 | batch_size = 1024 # The number of samples per batch 28 | num_classes = 10 # The 10 digits 29 | num_features = 784 # Each image is 28x28 pixels 30 | num_trees = 10 31 | max_nodes = 1000 32 | 33 | # Input and Target data 34 | X = tf.placeholder(tf.float32, shape=[None, num_features]) 35 | # For random forest, labels must be integers (the class id) 36 | Y = tf.placeholder(tf.int32, shape=[None]) 37 | 38 | # Random Forest Parameters 39 | hparams = tensor_forest.ForestHParams(num_classes=num_classes, 40 | num_features=num_features, 41 | num_trees=num_trees, 42 | max_nodes=max_nodes).fill() 43 | 44 | # Build the Random Forest 45 | forest_graph = tensor_forest.RandomForestGraphs(hparams) 46 | # Get training graph and loss 47 | train_op = forest_graph.training_graph(X, Y) 48 | loss_op = forest_graph.training_loss(X, Y) 49 | 50 | # Measure the accuracy 51 | infer_op, _, _ = forest_graph.inference_graph(X) 52 | correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64)) 53 | accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 54 | 55 | # Initialize the variables (i.e. assign their default value) and forest resources 56 | init_vars = tf.group(tf.global_variables_initializer(), 57 | resources.initialize_resources(resources.shared_resources())) 58 | 59 | # Start TensorFlow session 60 | sess = tf.Session() 61 | 62 | # Run the initializer 63 | sess.run(init_vars) 64 | 65 | # Training 66 | for i in range(1, num_steps + 1): 67 | # Prepare Data 68 | # Get the next batch of MNIST data (only images are needed, not labels) 69 | batch_x, batch_y = mnist.train.next_batch(batch_size) 70 | _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y}) 71 | if i % 50 == 0 or i == 1: 72 | acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y}) 73 | print('Step %i, Loss: %f, Acc: %f' % (i, l, acc)) 74 | 75 | # Test Model 76 | test_x, test_y = mnist.test.images, mnist.test.labels 77 | print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y})) 78 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/autoencoder.py: -------------------------------------------------------------------------------- 1 | """ Auto Encoder Example. 2 | 3 | Build a 2 layers auto-encoder with TensorFlow to compress images to a 4 | lower latent space and then reconstruct them. 5 | 6 | References: 7 | Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based 8 | learning applied to document recognition." Proceedings of the IEEE, 9 | 86(11):2278-2324, November 1998. 10 | 11 | Links: 12 | [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ 13 | 14 | Author: Aymeric Damien 15 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 16 | """ 17 | from __future__ import division, print_function, absolute_import 18 | 19 | import tensorflow as tf 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | 27 | # Training Parameters 28 | learning_rate = 0.01 29 | num_steps = 30000 30 | batch_size = 256 31 | 32 | display_step = 1000 33 | examples_to_show = 10 34 | 35 | # Network Parameters 36 | num_hidden_1 = 256 # 1st layer num features 37 | num_hidden_2 = 128 # 2nd layer num features (the latent dim) 38 | num_input = 784 # MNIST data input (img shape: 28*28) 39 | 40 | # tf Graph input (only pictures) 41 | X = tf.placeholder("float", [None, num_input]) 42 | 43 | weights = { 44 | 'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])), 45 | 'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])), 46 | 'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])), 47 | 'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input])), 48 | } 49 | biases = { 50 | 'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])), 51 | 'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])), 52 | 'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])), 53 | 'decoder_b2': tf.Variable(tf.random_normal([num_input])), 54 | } 55 | 56 | # Building the encoder 57 | def encoder(x): 58 | # Encoder Hidden layer with sigmoid activation #1 59 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), 60 | biases['encoder_b1'])) 61 | # Encoder Hidden layer with sigmoid activation #2 62 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), 63 | biases['encoder_b2'])) 64 | return layer_2 65 | 66 | 67 | # Building the decoder 68 | def decoder(x): 69 | # Decoder Hidden layer with sigmoid activation #1 70 | layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), 71 | biases['decoder_b1'])) 72 | # Decoder Hidden layer with sigmoid activation #2 73 | layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), 74 | biases['decoder_b2'])) 75 | return layer_2 76 | 77 | # Construct model 78 | encoder_op = encoder(X) 79 | decoder_op = decoder(encoder_op) 80 | 81 | # Prediction 82 | y_pred = decoder_op 83 | # Targets (Labels) are the input data. 84 | y_true = X 85 | 86 | # Define loss and optimizer, minimize the squared error 87 | loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) 88 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss) 89 | 90 | # Initialize the variables (i.e. assign their default value) 91 | init = tf.global_variables_initializer() 92 | 93 | # Start Training 94 | # Start a new TF session 95 | with tf.Session() as sess: 96 | 97 | # Run the initializer 98 | sess.run(init) 99 | 100 | # Training 101 | for i in range(1, num_steps+1): 102 | # Prepare Data 103 | # Get the next batch of MNIST data (only images are needed, not labels) 104 | batch_x, _ = mnist.train.next_batch(batch_size) 105 | 106 | # Run optimization op (backprop) and cost op (to get loss value) 107 | _, l = sess.run([optimizer, loss], feed_dict={X: batch_x}) 108 | # Display logs per step 109 | if i % display_step == 0 or i == 1: 110 | print('Step %i: Minibatch Loss: %f' % (i, l)) 111 | 112 | # Testing 113 | # Encode and decode images from test set and visualize their reconstruction. 114 | n = 4 115 | canvas_orig = np.empty((28 * n, 28 * n)) 116 | canvas_recon = np.empty((28 * n, 28 * n)) 117 | for i in range(n): 118 | # MNIST test set 119 | batch_x, _ = mnist.test.next_batch(n) 120 | # Encode and decode the digit image 121 | g = sess.run(decoder_op, feed_dict={X: batch_x}) 122 | 123 | # Display original images 124 | for j in range(n): 125 | # Draw the original digits 126 | canvas_orig[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \ 127 | batch_x[j].reshape([28, 28]) 128 | # Display reconstructed images 129 | for j in range(n): 130 | # Draw the reconstructed digits 131 | canvas_recon[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = \ 132 | g[j].reshape([28, 28]) 133 | 134 | print("Original Images") 135 | plt.figure(figsize=(n, n)) 136 | plt.imshow(canvas_orig, origin="upper", cmap="gray") 137 | plt.show() 138 | 139 | print("Reconstructed Images") 140 | plt.figure(figsize=(n, n)) 141 | plt.imshow(canvas_recon, origin="upper", cmap="gray") 142 | plt.show() 143 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/bidirectional_rnn.py: -------------------------------------------------------------------------------- 1 | """ Bi-directional Recurrent Neural Network. 2 | 3 | A Bi-directional Recurrent Neural Network (LSTM) implementation example using 4 | TensorFlow library. This example is using the MNIST database of handwritten 5 | digits (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Links: 8 | [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) 9 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 10 | 11 | Author: Aymeric Damien 12 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 13 | """ 14 | 15 | from __future__ import print_function 16 | 17 | import tensorflow as tf 18 | from tensorflow.contrib import rnn 19 | import numpy as np 20 | 21 | # Import MNIST data 22 | from tensorflow.examples.tutorials.mnist import input_data 23 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 24 | 25 | ''' 26 | To classify images using a bidirectional recurrent neural network, we consider 27 | every image row as a sequence of pixels. Because MNIST image shape is 28*28px, 28 | we will then handle 28 sequences of 28 steps for every sample. 29 | ''' 30 | 31 | # Training Parameters 32 | learning_rate = 0.001 33 | training_steps = 10000 34 | batch_size = 128 35 | display_step = 200 36 | 37 | # Network Parameters 38 | num_input = 28 # MNIST data input (img shape: 28*28) 39 | timesteps = 28 # timesteps 40 | num_hidden = 128 # hidden layer num of features 41 | num_classes = 10 # MNIST total classes (0-9 digits) 42 | 43 | # tf Graph input 44 | X = tf.placeholder("float", [None, timesteps, num_input]) 45 | Y = tf.placeholder("float", [None, num_classes]) 46 | 47 | # Define weights 48 | weights = { 49 | # Hidden layer weights => 2*n_hidden because of forward + backward cells 50 | 'out': tf.Variable(tf.random_normal([2*num_hidden, num_classes])) 51 | } 52 | biases = { 53 | 'out': tf.Variable(tf.random_normal([num_classes])) 54 | } 55 | 56 | 57 | def BiRNN(x, weights, biases): 58 | 59 | # Prepare data shape to match `rnn` function requirements 60 | # Current data input shape: (batch_size, timesteps, n_input) 61 | # Required shape: 'timesteps' tensors list of shape (batch_size, num_input) 62 | 63 | # Unstack to get a list of 'timesteps' tensors of shape (batch_size, num_input) 64 | x = tf.unstack(x, timesteps, 1) 65 | 66 | # Define lstm cells with tensorflow 67 | # Forward direction cell 68 | lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 69 | # Backward direction cell 70 | lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 71 | 72 | # Get lstm cell output 73 | try: 74 | outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 75 | dtype=tf.float32) 76 | except Exception: # Old TensorFlow version only returns outputs not states 77 | outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, 78 | dtype=tf.float32) 79 | 80 | # Linear activation, using rnn inner loop last output 81 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 82 | 83 | logits = BiRNN(X, weights, biases) 84 | prediction = tf.nn.softmax(logits) 85 | 86 | # Define loss and optimizer 87 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 88 | logits=logits, labels=Y)) 89 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 90 | train_op = optimizer.minimize(loss_op) 91 | 92 | # Evaluate model (with test logits, for dropout to be disabled) 93 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 94 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 95 | 96 | # Initialize the variables (i.e. assign their default value) 97 | init = tf.global_variables_initializer() 98 | 99 | # Start training 100 | with tf.Session() as sess: 101 | 102 | # Run the initializer 103 | sess.run(init) 104 | 105 | for step in range(1, training_steps+1): 106 | batch_x, batch_y = mnist.train.next_batch(batch_size) 107 | # Reshape data to get 28 seq of 28 elements 108 | batch_x = batch_x.reshape((batch_size, timesteps, num_input)) 109 | # Run optimization op (backprop) 110 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 111 | if step % display_step == 0 or step == 1: 112 | # Calculate batch loss and accuracy 113 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 114 | Y: batch_y}) 115 | print("Step " + str(step) + ", Minibatch Loss= " + \ 116 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 117 | "{:.3f}".format(acc)) 118 | 119 | print("Optimization Finished!") 120 | 121 | # Calculate accuracy for 128 mnist test images 122 | test_len = 128 123 | test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input)) 124 | test_label = mnist.test.labels[:test_len] 125 | print("Testing Accuracy:", \ 126 | sess.run(accuracy, feed_dict={X: test_data, Y: test_label})) 127 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/convolutional_network.py: -------------------------------------------------------------------------------- 1 | """ Convolutional Neural Network. 2 | 3 | Build and train a convolutional neural network with TensorFlow. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | This example is using TensorFlow layers API, see 'convolutional_network_raw' 8 | example for a raw implementation with variables. 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | from __future__ import division, print_function, absolute_import 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 18 | 19 | import tensorflow as tf 20 | 21 | # Training Parameters 22 | learning_rate = 0.001 23 | num_steps = 2000 24 | batch_size = 128 25 | 26 | # Network Parameters 27 | num_input = 784 # MNIST data input (img shape: 28*28) 28 | num_classes = 10 # MNIST total classes (0-9 digits) 29 | dropout = 0.25 # Dropout, probability to drop a unit 30 | 31 | 32 | # Create the neural network 33 | def conv_net(x_dict, n_classes, dropout, reuse, is_training): 34 | # Define a scope for reusing the variables 35 | with tf.variable_scope('ConvNet', reuse=reuse): 36 | # TF Estimator input is a dict, in case of multiple inputs 37 | x = x_dict['images'] 38 | 39 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 40 | # Reshape to match picture format [Height x Width x Channel] 41 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 42 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 43 | 44 | # Convolution Layer with 32 filters and a kernel size of 5 45 | conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) 46 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 47 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 48 | 49 | # Convolution Layer with 64 filters and a kernel size of 3 50 | conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu) 51 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 52 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 53 | 54 | # Flatten the data to a 1-D vector for the fully connected layer 55 | fc1 = tf.contrib.layers.flatten(conv2) 56 | 57 | # Fully connected layer (in tf contrib folder for now) 58 | fc1 = tf.layers.dense(fc1, 1024) 59 | # Apply Dropout (if is_training is False, dropout is not applied) 60 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 61 | 62 | # Output layer, class prediction 63 | out = tf.layers.dense(fc1, n_classes) 64 | 65 | return out 66 | 67 | 68 | # Define the model function (following TF Estimator Template) 69 | def model_fn(features, labels, mode): 70 | # Build the neural network 71 | # Because Dropout have different behavior at training and prediction time, we 72 | # need to create 2 distinct computation graphs that still share the same weights. 73 | logits_train = conv_net(features, num_classes, dropout, reuse=False, 74 | is_training=True) 75 | logits_test = conv_net(features, num_classes, dropout, reuse=True, 76 | is_training=False) 77 | 78 | # Predictions 79 | pred_classes = tf.argmax(logits_test, axis=1) 80 | pred_probas = tf.nn.softmax(logits_test) 81 | 82 | # If prediction mode, early return 83 | if mode == tf.estimator.ModeKeys.PREDICT: 84 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 85 | 86 | # Define loss and optimizer 87 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 88 | logits=logits_train, labels=tf.cast(labels, dtype=tf.int32))) 89 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 90 | train_op = optimizer.minimize(loss_op, 91 | global_step=tf.train.get_global_step()) 92 | 93 | # Evaluate the accuracy of the model 94 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 95 | 96 | # TF Estimators requires to return a EstimatorSpec, that specify 97 | # the different ops for training, evaluating, ... 98 | estim_specs = tf.estimator.EstimatorSpec( 99 | mode=mode, 100 | predictions=pred_classes, 101 | loss=loss_op, 102 | train_op=train_op, 103 | eval_metric_ops={'accuracy': acc_op}) 104 | 105 | return estim_specs 106 | 107 | # Build the Estimator 108 | model = tf.estimator.Estimator(model_fn) 109 | 110 | # Define the input function for training 111 | input_fn = tf.estimator.inputs.numpy_input_fn( 112 | x={'images': mnist.train.images}, y=mnist.train.labels, 113 | batch_size=batch_size, num_epochs=None, shuffle=True) 114 | # Train the Model 115 | model.train(input_fn, steps=num_steps) 116 | 117 | # Evaluate the Model 118 | # Define the input function for evaluating 119 | input_fn = tf.estimator.inputs.numpy_input_fn( 120 | x={'images': mnist.test.images}, y=mnist.test.labels, 121 | batch_size=batch_size, shuffle=False) 122 | # Use the Estimator 'evaluate' method 123 | e = model.evaluate(input_fn) 124 | 125 | print("Testing Accuracy:", e['accuracy']) 126 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/convolutional_network_raw.py: -------------------------------------------------------------------------------- 1 | """ Convolutional Neural Network. 2 | 3 | Build and train a convolutional neural network with TensorFlow. 4 | This example is using the MNIST database of handwritten digits 5 | (http://yann.lecun.com/exdb/mnist/) 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | 11 | from __future__ import division, print_function, absolute_import 12 | 13 | import tensorflow as tf 14 | 15 | # Import MNIST data 16 | from tensorflow.examples.tutorials.mnist import input_data 17 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 18 | 19 | # Training Parameters 20 | learning_rate = 0.001 21 | num_steps = 200 22 | batch_size = 128 23 | display_step = 10 24 | 25 | # Network Parameters 26 | num_input = 784 # MNIST data input (img shape: 28*28) 27 | num_classes = 10 # MNIST total classes (0-9 digits) 28 | dropout = 0.75 # Dropout, probability to keep units 29 | 30 | # tf Graph input 31 | X = tf.placeholder(tf.float32, [None, num_input]) 32 | Y = tf.placeholder(tf.float32, [None, num_classes]) 33 | keep_prob = tf.placeholder(tf.float32) # dropout (keep probability) 34 | 35 | 36 | # Create some wrappers for simplicity 37 | def conv2d(x, W, b, strides=1): 38 | # Conv2D wrapper, with bias and relu activation 39 | x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') 40 | x = tf.nn.bias_add(x, b) 41 | return tf.nn.relu(x) 42 | 43 | 44 | def maxpool2d(x, k=2): 45 | # MaxPool2D wrapper 46 | return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], 47 | padding='SAME') 48 | 49 | 50 | # Create model 51 | def conv_net(x, weights, biases, dropout): 52 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 53 | # Reshape to match picture format [Height x Width x Channel] 54 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 55 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 56 | 57 | # Convolution Layer 58 | conv1 = conv2d(x, weights['wc1'], biases['bc1']) 59 | # Max Pooling (down-sampling) 60 | conv1 = maxpool2d(conv1, k=2) 61 | 62 | # Convolution Layer 63 | conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) 64 | # Max Pooling (down-sampling) 65 | conv2 = maxpool2d(conv2, k=2) 66 | 67 | # Fully connected layer 68 | # Reshape conv2 output to fit fully connected layer input 69 | fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) 70 | fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) 71 | fc1 = tf.nn.relu(fc1) 72 | # Apply Dropout 73 | fc1 = tf.nn.dropout(fc1, dropout) 74 | 75 | # Output, class prediction 76 | out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) 77 | return out 78 | 79 | # Store layers weight & bias 80 | weights = { 81 | # 5x5 conv, 1 input, 32 outputs 82 | 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), 83 | # 5x5 conv, 32 inputs, 64 outputs 84 | 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), 85 | # fully connected, 7*7*64 inputs, 1024 outputs 86 | 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), 87 | # 1024 inputs, 10 outputs (class prediction) 88 | 'out': tf.Variable(tf.random_normal([1024, num_classes])) 89 | } 90 | 91 | biases = { 92 | 'bc1': tf.Variable(tf.random_normal([32])), 93 | 'bc2': tf.Variable(tf.random_normal([64])), 94 | 'bd1': tf.Variable(tf.random_normal([1024])), 95 | 'out': tf.Variable(tf.random_normal([num_classes])) 96 | } 97 | 98 | # Construct model 99 | logits = conv_net(X, weights, biases, keep_prob) 100 | prediction = tf.nn.softmax(logits) 101 | 102 | # Define loss and optimizer 103 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 104 | logits=logits, labels=Y)) 105 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 106 | train_op = optimizer.minimize(loss_op) 107 | 108 | 109 | # Evaluate model 110 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 111 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 112 | 113 | # Initialize the variables (i.e. assign their default value) 114 | init = tf.global_variables_initializer() 115 | 116 | # Start training 117 | with tf.Session() as sess: 118 | 119 | # Run the initializer 120 | sess.run(init) 121 | 122 | for step in range(1, num_steps+1): 123 | batch_x, batch_y = mnist.train.next_batch(batch_size) 124 | # Run optimization op (backprop) 125 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.8}) 126 | if step % display_step == 0 or step == 1: 127 | # Calculate batch loss and accuracy 128 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 129 | Y: batch_y, 130 | keep_prob: 1.0}) 131 | print("Step " + str(step) + ", Minibatch Loss= " + \ 132 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 133 | "{:.3f}".format(acc)) 134 | 135 | print("Optimization Finished!") 136 | 137 | # Calculate accuracy for 256 MNIST test images 138 | print("Testing Accuracy:", \ 139 | sess.run(accuracy, feed_dict={X: mnist.test.images[:256], 140 | Y: mnist.test.labels[:256], 141 | keep_prob: 1.0})) 142 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/multilayer_perceptron.py: -------------------------------------------------------------------------------- 1 | """ Multilayer Perceptron. 2 | 3 | A Multilayer Perceptron (Neural Network) implementation example using 4 | TensorFlow library. This example is using the MNIST database of handwritten 5 | digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | # ------------------------------------------------------------------ 15 | # 16 | # THIS EXAMPLE HAS BEEN RENAMED 'neural_network.py', FOR SIMPLICITY. 17 | # 18 | # ------------------------------------------------------------------ 19 | 20 | 21 | from __future__ import print_function 22 | 23 | # Import MNIST data 24 | from tensorflow.examples.tutorials.mnist import input_data 25 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 26 | 27 | import tensorflow as tf 28 | 29 | # Parameters 30 | learning_rate = 0.001 31 | training_epochs = 15 32 | batch_size = 100 33 | display_step = 1 34 | 35 | # Network Parameters 36 | n_hidden_1 = 256 # 1st layer number of neurons 37 | n_hidden_2 = 256 # 2nd layer number of neurons 38 | n_input = 784 # MNIST data input (img shape: 28*28) 39 | n_classes = 10 # MNIST total classes (0-9 digits) 40 | 41 | # tf Graph input 42 | X = tf.placeholder("float", [None, n_input]) 43 | Y = tf.placeholder("float", [None, n_classes]) 44 | 45 | # Store layers weight & bias 46 | weights = { 47 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 48 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 49 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 50 | } 51 | biases = { 52 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 53 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 54 | 'out': tf.Variable(tf.random_normal([n_classes])) 55 | } 56 | 57 | 58 | # Create model 59 | def multilayer_perceptron(x): 60 | # Hidden fully connected layer with 256 neurons 61 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 62 | # Hidden fully connected layer with 256 neurons 63 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 64 | # Output fully connected layer with a neuron for each class 65 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 66 | return out_layer 67 | 68 | # Construct model 69 | logits = multilayer_perceptron(X) 70 | 71 | # Define loss and optimizer 72 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 73 | logits=logits, labels=Y)) 74 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 75 | train_op = optimizer.minimize(loss_op) 76 | # Initializing the variables 77 | init = tf.global_variables_initializer() 78 | 79 | with tf.Session() as sess: 80 | sess.run(init) 81 | 82 | # Training cycle 83 | for epoch in range(training_epochs): 84 | avg_cost = 0. 85 | total_batch = int(mnist.train.num_examples/batch_size) 86 | # Loop over all batches 87 | for i in range(total_batch): 88 | batch_x, batch_y = mnist.train.next_batch(batch_size) 89 | # Run optimization op (backprop) and cost op (to get loss value) 90 | _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, 91 | Y: batch_y}) 92 | # Compute average loss 93 | avg_cost += c / total_batch 94 | # Display logs per epoch step 95 | if epoch % display_step == 0: 96 | print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost)) 97 | print("Optimization Finished!") 98 | 99 | # Test model 100 | pred = tf.nn.softmax(logits) # Apply softmax to logits 101 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1)) 102 | # Calculate accuracy 103 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 104 | print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels})) 105 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/neural_network.py: -------------------------------------------------------------------------------- 1 | """ Neural Network. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | This example is using TensorFlow layers, see 'neural_network_raw' example for 8 | a raw implementation with variables. 9 | 10 | Links: 11 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 12 | 13 | Author: Aymeric Damien 14 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 15 | """ 16 | 17 | from __future__ import print_function 18 | 19 | # Import MNIST data 20 | from tensorflow.examples.tutorials.mnist import input_data 21 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 22 | 23 | import tensorflow as tf 24 | 25 | # Parameters 26 | learning_rate = 0.1 27 | num_steps = 1000 28 | batch_size = 128 29 | display_step = 100 30 | 31 | # Network Parameters 32 | n_hidden_1 = 256 # 1st layer number of neurons 33 | n_hidden_2 = 256 # 2nd layer number of neurons 34 | num_input = 784 # MNIST data input (img shape: 28*28) 35 | num_classes = 10 # MNIST total classes (0-9 digits) 36 | 37 | 38 | # Define the neural network 39 | def neural_net(x_dict): 40 | # TF Estimator input is a dict, in case of multiple inputs 41 | x = x_dict['images'] 42 | # Hidden fully connected layer with 256 neurons 43 | layer_1 = tf.layers.dense(x, n_hidden_1) 44 | # Hidden fully connected layer with 256 neurons 45 | layer_2 = tf.layers.dense(layer_1, n_hidden_2) 46 | # Output fully connected layer with a neuron for each class 47 | out_layer = tf.layers.dense(layer_2, num_classes) 48 | return out_layer 49 | 50 | 51 | # Define the model function (following TF Estimator Template) 52 | def model_fn(features, labels, mode): 53 | # Build the neural network 54 | logits = neural_net(features) 55 | 56 | # Predictions 57 | pred_classes = tf.argmax(logits, axis=1) 58 | pred_probas = tf.nn.softmax(logits) 59 | 60 | # If prediction mode, early return 61 | if mode == tf.estimator.ModeKeys.PREDICT: 62 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 63 | 64 | # Define loss and optimizer 65 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 66 | logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 67 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 68 | train_op = optimizer.minimize(loss_op, 69 | global_step=tf.train.get_global_step()) 70 | 71 | # Evaluate the accuracy of the model 72 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 73 | 74 | # TF Estimators requires to return a EstimatorSpec, that specify 75 | # the different ops for training, evaluating, ... 76 | estim_specs = tf.estimator.EstimatorSpec( 77 | mode=mode, 78 | predictions=pred_classes, 79 | loss=loss_op, 80 | train_op=train_op, 81 | eval_metric_ops={'accuracy': acc_op}) 82 | 83 | return estim_specs 84 | 85 | # Build the Estimator 86 | model = tf.estimator.Estimator(model_fn) 87 | 88 | # Define the input function for training 89 | input_fn = tf.estimator.inputs.numpy_input_fn( 90 | x={'images': mnist.train.images}, y=mnist.train.labels, 91 | batch_size=batch_size, num_epochs=None, shuffle=True) 92 | # Train the Model 93 | model.train(input_fn, steps=num_steps) 94 | 95 | # Evaluate the Model 96 | # Define the input function for evaluating 97 | input_fn = tf.estimator.inputs.numpy_input_fn( 98 | x={'images': mnist.test.images}, y=mnist.test.labels, 99 | batch_size=batch_size, shuffle=False) 100 | # Use the Estimator 'evaluate' method 101 | e = model.evaluate(input_fn) 102 | 103 | print("Testing Accuracy:", e['accuracy']) 104 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/neural_network_eager_api.py: -------------------------------------------------------------------------------- 1 | """ Neural Network with Eager API. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow's Eager API. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | This example is using TensorFlow layers, see 'neural_network_raw' example for 8 | a raw implementation with variables. 9 | 10 | Links: 11 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 12 | 13 | Author: Aymeric Damien 14 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 15 | """ 16 | from __future__ import print_function 17 | 18 | import tensorflow as tf 19 | 20 | # Set Eager API 21 | tf.enable_eager_execution() 22 | tfe = tf.contrib.eager 23 | 24 | # Import MNIST data 25 | from tensorflow.examples.tutorials.mnist import input_data 26 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 27 | 28 | # Parameters 29 | learning_rate = 0.001 30 | num_steps = 1000 31 | batch_size = 128 32 | display_step = 100 33 | 34 | # Network Parameters 35 | n_hidden_1 = 256 # 1st layer number of neurons 36 | n_hidden_2 = 256 # 2nd layer number of neurons 37 | num_input = 784 # MNIST data input (img shape: 28*28) 38 | num_classes = 10 # MNIST total classes (0-9 digits) 39 | 40 | # Using TF Dataset to split data into batches 41 | dataset = tf.data.Dataset.from_tensor_slices( 42 | (mnist.train.images, mnist.train.labels)) 43 | dataset = dataset.repeat().batch(batch_size).prefetch(batch_size) 44 | dataset_iter = tfe.Iterator(dataset) 45 | 46 | 47 | # Define the neural network. To use eager API and tf.layers API together, 48 | # we must instantiate a tfe.Network class as follow: 49 | class NeuralNet(tfe.Network): 50 | def __init__(self): 51 | # Define each layer 52 | super(NeuralNet, self).__init__() 53 | # Hidden fully connected layer with 256 neurons 54 | self.layer1 = self.track_layer( 55 | tf.layers.Dense(n_hidden_1, activation=tf.nn.relu)) 56 | # Hidden fully connected layer with 256 neurons 57 | self.layer2 = self.track_layer( 58 | tf.layers.Dense(n_hidden_2, activation=tf.nn.relu)) 59 | # Output fully connected layer with a neuron for each class 60 | self.out_layer = self.track_layer(tf.layers.Dense(num_classes)) 61 | 62 | def call(self, x): 63 | x = self.layer1(x) 64 | x = self.layer2(x) 65 | return self.out_layer(x) 66 | 67 | 68 | neural_net = NeuralNet() 69 | 70 | 71 | # Cross-Entropy loss function 72 | def loss_fn(inference_fn, inputs, labels): 73 | # Using sparse_softmax cross entropy 74 | return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( 75 | logits=inference_fn(inputs), labels=labels)) 76 | 77 | 78 | # Calculate accuracy 79 | def accuracy_fn(inference_fn, inputs, labels): 80 | prediction = tf.nn.softmax(inference_fn(inputs)) 81 | correct_pred = tf.equal(tf.argmax(prediction, 1), labels) 82 | return tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 83 | 84 | 85 | # SGD Optimizer 86 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 87 | # Compute gradients 88 | grad = tfe.implicit_gradients(loss_fn) 89 | 90 | # Training 91 | average_loss = 0. 92 | average_acc = 0. 93 | for step in range(num_steps): 94 | 95 | # Iterate through the dataset 96 | d = dataset_iter.next() 97 | 98 | # Images 99 | x_batch = d[0] 100 | # Labels 101 | y_batch = tf.cast(d[1], dtype=tf.int64) 102 | 103 | # Compute the batch loss 104 | batch_loss = loss_fn(neural_net, x_batch, y_batch) 105 | average_loss += batch_loss 106 | # Compute the batch accuracy 107 | batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch) 108 | average_acc += batch_accuracy 109 | 110 | if step == 0: 111 | # Display the initial cost, before optimizing 112 | print("Initial loss= {:.9f}".format(average_loss)) 113 | 114 | # Update the variables following gradients info 115 | optimizer.apply_gradients(grad(neural_net, x_batch, y_batch)) 116 | 117 | # Display info 118 | if (step + 1) % display_step == 0 or step == 0: 119 | if step > 0: 120 | average_loss /= display_step 121 | average_acc /= display_step 122 | print("Step:", '%04d' % (step + 1), " loss=", 123 | "{:.9f}".format(average_loss), " accuracy=", 124 | "{:.4f}".format(average_acc)) 125 | average_loss = 0. 126 | average_acc = 0. 127 | 128 | # Evaluate model on the test image set 129 | testX = mnist.test.images 130 | testY = mnist.test.labels 131 | 132 | test_acc = accuracy_fn(neural_net, testX, testY) 133 | print("Testset Accuracy: {:.4f}".format(test_acc)) 134 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/neural_network_raw.py: -------------------------------------------------------------------------------- 1 | """ Neural Network. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | # Import MNIST data 17 | from tensorflow.examples.tutorials.mnist import input_data 18 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 19 | 20 | import tensorflow as tf 21 | 22 | # Parameters 23 | learning_rate = 0.1 24 | num_steps = 500 25 | batch_size = 128 26 | display_step = 100 27 | 28 | # Network Parameters 29 | n_hidden_1 = 256 # 1st layer number of neurons 30 | n_hidden_2 = 256 # 2nd layer number of neurons 31 | num_input = 784 # MNIST data input (img shape: 28*28) 32 | num_classes = 10 # MNIST total classes (0-9 digits) 33 | 34 | # tf Graph input 35 | X = tf.placeholder("float", [None, num_input]) 36 | Y = tf.placeholder("float", [None, num_classes]) 37 | 38 | # Store layers weight & bias 39 | weights = { 40 | 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 41 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 42 | 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) 43 | } 44 | biases = { 45 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 46 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 47 | 'out': tf.Variable(tf.random_normal([num_classes])) 48 | } 49 | 50 | 51 | # Create model 52 | def neural_net(x): 53 | # Hidden fully connected layer with 256 neurons 54 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 55 | # Hidden fully connected layer with 256 neurons 56 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 57 | # Output fully connected layer with a neuron for each class 58 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 59 | return out_layer 60 | 61 | # Construct model 62 | logits = neural_net(X) 63 | prediction = tf.nn.softmax(logits) 64 | 65 | # Define loss and optimizer 66 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 67 | logits=logits, labels=Y)) 68 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 69 | train_op = optimizer.minimize(loss_op) 70 | 71 | # Evaluate model 72 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 73 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 74 | 75 | # Initialize the variables (i.e. assign their default value) 76 | init = tf.global_variables_initializer() 77 | 78 | # Start training 79 | with tf.Session() as sess: 80 | 81 | # Run the initializer 82 | sess.run(init) 83 | 84 | for step in range(1, num_steps+1): 85 | batch_x, batch_y = mnist.train.next_batch(batch_size) 86 | # Run optimization op (backprop) 87 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 88 | if step % display_step == 0 or step == 1: 89 | # Calculate batch loss and accuracy 90 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 91 | Y: batch_y}) 92 | print("Step " + str(step) + ", Minibatch Loss= " + \ 93 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 94 | "{:.3f}".format(acc)) 95 | 96 | print("Optimization Finished!") 97 | 98 | # Calculate accuracy for MNIST test images 99 | print("Testing Accuracy:", \ 100 | sess.run(accuracy, feed_dict={X: mnist.test.images, 101 | Y: mnist.test.labels})) 102 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/3_NeuralNetworks/recurrent_network.py: -------------------------------------------------------------------------------- 1 | """ Recurrent Neural Network. 2 | 3 | A Recurrent Neural Network (LSTM) implementation example using TensorFlow library. 4 | This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Links: 7 | [Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | import tensorflow as tf 17 | from tensorflow.contrib import rnn 18 | 19 | # Import MNIST data 20 | from tensorflow.examples.tutorials.mnist import input_data 21 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 22 | 23 | ''' 24 | To classify images using a recurrent neural network, we consider every image 25 | row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then 26 | handle 28 sequences of 28 steps for every sample. 27 | ''' 28 | 29 | # Training Parameters 30 | learning_rate = 0.001 31 | training_steps = 10000 32 | batch_size = 128 33 | display_step = 200 34 | 35 | # Network Parameters 36 | num_input = 28 # MNIST data input (img shape: 28*28) 37 | timesteps = 28 # timesteps 38 | num_hidden = 128 # hidden layer num of features 39 | num_classes = 10 # MNIST total classes (0-9 digits) 40 | 41 | # tf Graph input 42 | X = tf.placeholder("float", [None, timesteps, num_input]) 43 | Y = tf.placeholder("float", [None, num_classes]) 44 | 45 | # Define weights 46 | weights = { 47 | 'out': tf.Variable(tf.random_normal([num_hidden, num_classes])) 48 | } 49 | biases = { 50 | 'out': tf.Variable(tf.random_normal([num_classes])) 51 | } 52 | 53 | 54 | def RNN(x, weights, biases): 55 | 56 | # Prepare data shape to match `rnn` function requirements 57 | # Current data input shape: (batch_size, timesteps, n_input) 58 | # Required shape: 'timesteps' tensors list of shape (batch_size, n_input) 59 | 60 | # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input) 61 | x = tf.unstack(x, timesteps, 1) 62 | 63 | # Define a lstm cell with tensorflow 64 | lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0) 65 | 66 | # Get lstm cell output 67 | outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 68 | 69 | # Linear activation, using rnn inner loop last output 70 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 71 | 72 | logits = RNN(X, weights, biases) 73 | prediction = tf.nn.softmax(logits) 74 | 75 | # Define loss and optimizer 76 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 77 | logits=logits, labels=Y)) 78 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 79 | train_op = optimizer.minimize(loss_op) 80 | 81 | # Evaluate model (with test logits, for dropout to be disabled) 82 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 83 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 84 | 85 | # Initialize the variables (i.e. assign their default value) 86 | init = tf.global_variables_initializer() 87 | 88 | # Start training 89 | with tf.Session() as sess: 90 | 91 | # Run the initializer 92 | sess.run(init) 93 | 94 | for step in range(1, training_steps+1): 95 | batch_x, batch_y = mnist.train.next_batch(batch_size) 96 | # Reshape data to get 28 seq of 28 elements 97 | batch_x = batch_x.reshape((batch_size, timesteps, num_input)) 98 | # Run optimization op (backprop) 99 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 100 | if step % display_step == 0 or step == 1: 101 | # Calculate batch loss and accuracy 102 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 103 | Y: batch_y}) 104 | print("Step " + str(step) + ", Minibatch Loss= " + \ 105 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 106 | "{:.3f}".format(acc)) 107 | 108 | print("Optimization Finished!") 109 | 110 | # Calculate accuracy for 128 mnist test images 111 | test_len = 128 112 | test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input)) 113 | test_label = mnist.test.labels[:test_len] 114 | print("Testing Accuracy:", \ 115 | sess.run(accuracy, feed_dict={X: test_data, Y: test_label})) 116 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/4_Utils/save_restore_model.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Save and Restore a model using TensorFlow. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | # Import MNIST data 13 | from tensorflow.examples.tutorials.mnist import input_data 14 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 15 | 16 | import tensorflow as tf 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | batch_size = 100 21 | display_step = 1 22 | model_path = "/tmp/model.ckpt" 23 | 24 | # Network Parameters 25 | n_hidden_1 = 256 # 1st layer number of features 26 | n_hidden_2 = 256 # 2nd layer number of features 27 | n_input = 784 # MNIST data input (img shape: 28*28) 28 | n_classes = 10 # MNIST total classes (0-9 digits) 29 | 30 | # tf Graph input 31 | x = tf.placeholder("float", [None, n_input]) 32 | y = tf.placeholder("float", [None, n_classes]) 33 | 34 | 35 | # Create model 36 | def multilayer_perceptron(x, weights, biases): 37 | # Hidden layer with RELU activation 38 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 39 | layer_1 = tf.nn.relu(layer_1) 40 | # Hidden layer with RELU activation 41 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 42 | layer_2 = tf.nn.relu(layer_2) 43 | # Output layer with linear activation 44 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 45 | return out_layer 46 | 47 | # Store layers weight & bias 48 | weights = { 49 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 50 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 51 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) 52 | } 53 | biases = { 54 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 55 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 56 | 'out': tf.Variable(tf.random_normal([n_classes])) 57 | } 58 | 59 | # Construct model 60 | pred = multilayer_perceptron(x, weights, biases) 61 | 62 | # Define loss and optimizer 63 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 64 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 65 | 66 | # Initialize the variables (i.e. assign their default value) 67 | init = tf.global_variables_initializer() 68 | 69 | # 'Saver' op to save and restore all the variables 70 | saver = tf.train.Saver() 71 | 72 | # Running first session 73 | print("Starting 1st session...") 74 | with tf.Session() as sess: 75 | 76 | # Run the initializer 77 | sess.run(init) 78 | 79 | # Training cycle 80 | for epoch in range(3): 81 | avg_cost = 0. 82 | total_batch = int(mnist.train.num_examples/batch_size) 83 | # Loop over all batches 84 | for i in range(total_batch): 85 | batch_x, batch_y = mnist.train.next_batch(batch_size) 86 | # Run optimization op (backprop) and cost op (to get loss value) 87 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 88 | y: batch_y}) 89 | # Compute average loss 90 | avg_cost += c / total_batch 91 | # Display logs per epoch step 92 | if epoch % display_step == 0: 93 | print("Epoch:", '%04d' % (epoch+1), "cost=", \ 94 | "{:.9f}".format(avg_cost)) 95 | print("First Optimization Finished!") 96 | 97 | # Test model 98 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 99 | # Calculate accuracy 100 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 101 | print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 102 | 103 | # Save model weights to disk 104 | save_path = saver.save(sess, model_path) 105 | print("Model saved in file: %s" % save_path) 106 | 107 | # Running a new session 108 | print("Starting 2nd session...") 109 | with tf.Session() as sess: 110 | # Initialize variables 111 | sess.run(init) 112 | 113 | # Restore model weights from previously saved model 114 | saver.restore(sess, model_path) 115 | print("Model restored from file: %s" % save_path) 116 | 117 | # Resume training 118 | for epoch in range(7): 119 | avg_cost = 0. 120 | total_batch = int(mnist.train.num_examples / batch_size) 121 | # Loop over all batches 122 | for i in range(total_batch): 123 | batch_x, batch_y = mnist.train.next_batch(batch_size) 124 | # Run optimization op (backprop) and cost op (to get loss value) 125 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 126 | y: batch_y}) 127 | # Compute average loss 128 | avg_cost += c / total_batch 129 | # Display logs per epoch step 130 | if epoch % display_step == 0: 131 | print("Epoch:", '%04d' % (epoch + 1), "cost=", \ 132 | "{:.9f}".format(avg_cost)) 133 | print("Second Optimization Finished!") 134 | 135 | # Test model 136 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 137 | # Calculate accuracy 138 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 139 | print("Accuracy:", accuracy.eval( 140 | {x: mnist.test.images, y: mnist.test.labels})) 141 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/4_Utils/tensorboard_basic.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Graph and Loss visualization using Tensorboard. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | ''' 9 | 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.01 20 | training_epochs = 25 21 | batch_size = 100 22 | display_epoch = 1 23 | logs_path = '/tmp/tensorflow_logs/example/' 24 | 25 | # tf Graph Input 26 | # mnist data image of shape 28*28=784 27 | x = tf.placeholder(tf.float32, [None, 784], name='InputData') 28 | # 0-9 digits recognition => 10 classes 29 | y = tf.placeholder(tf.float32, [None, 10], name='LabelData') 30 | 31 | # Set model weights 32 | W = tf.Variable(tf.zeros([784, 10]), name='Weights') 33 | b = tf.Variable(tf.zeros([10]), name='Bias') 34 | 35 | # Construct model and encapsulating all ops into scopes, making 36 | # Tensorboard's Graph visualization more convenient 37 | with tf.name_scope('Model'): 38 | # Model 39 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 40 | with tf.name_scope('Loss'): 41 | # Minimize error using cross entropy 42 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 43 | with tf.name_scope('SGD'): 44 | # Gradient Descent 45 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 46 | with tf.name_scope('Accuracy'): 47 | # Accuracy 48 | acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 49 | acc = tf.reduce_mean(tf.cast(acc, tf.float32)) 50 | 51 | # Initialize the variables (i.e. assign their default value) 52 | init = tf.global_variables_initializer() 53 | 54 | # Create a summary to monitor cost tensor 55 | tf.summary.scalar("loss", cost) 56 | # Create a summary to monitor accuracy tensor 57 | tf.summary.scalar("accuracy", acc) 58 | # Merge all summaries into a single op 59 | merged_summary_op = tf.summary.merge_all() 60 | 61 | # Start training 62 | with tf.Session() as sess: 63 | 64 | # Run the initializer 65 | sess.run(init) 66 | 67 | # op to write logs to Tensorboard 68 | summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) 69 | 70 | # Training cycle 71 | for epoch in range(training_epochs): 72 | avg_cost = 0. 73 | total_batch = int(mnist.train.num_examples/batch_size) 74 | # Loop over all batches 75 | for i in range(total_batch): 76 | batch_xs, batch_ys = mnist.train.next_batch(batch_size) 77 | # Run optimization op (backprop), cost op (to get loss value) 78 | # and summary nodes 79 | _, c, summary = sess.run([optimizer, cost, merged_summary_op], 80 | feed_dict={x: batch_xs, y: batch_ys}) 81 | # Write logs at every iteration 82 | summary_writer.add_summary(summary, epoch * total_batch + i) 83 | # Compute average loss 84 | avg_cost += c / total_batch 85 | # Display logs per epoch step 86 | if (epoch+1) % display_epoch == 0: 87 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) 88 | 89 | print("Optimization Finished!") 90 | 91 | # Test model 92 | # Calculate accuracy 93 | print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels})) 94 | 95 | print("Run the command line:\n" \ 96 | "--> tensorboard --logdir=/tmp/tensorflow_logs " \ 97 | "\nThen open http://0.0.0.0:6006/ into your web browser") 98 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/5_DataManagement/tensorflow_dataset_api.py: -------------------------------------------------------------------------------- 1 | """ TensorFlow Dataset API. 2 | 3 | In this example, we will show how to load numpy array data into the new 4 | TensorFlow 'Dataset' API. The Dataset API implements an optimized data pipeline 5 | with queues, that make data processing and training faster (especially on GPU). 6 | 7 | Author: Aymeric Damien 8 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 9 | """ 10 | from __future__ import print_function 11 | 12 | import tensorflow as tf 13 | 14 | # Import MNIST data (Numpy format) 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 17 | 18 | # Parameters 19 | learning_rate = 0.001 20 | num_steps = 2000 21 | batch_size = 128 22 | display_step = 100 23 | 24 | # Network Parameters 25 | n_input = 784 # MNIST data input (img shape: 28*28) 26 | n_classes = 10 # MNIST total classes (0-9 digits) 27 | dropout = 0.75 # Dropout, probability to keep units 28 | 29 | sess = tf.Session() 30 | 31 | # Create a dataset tensor from the images and the labels 32 | dataset = tf.data.Dataset.from_tensor_slices( 33 | (mnist.train.images, mnist.train.labels)) 34 | # Automatically refill the data queue when empty 35 | dataset = dataset.repeat() 36 | # Create batches of data 37 | dataset = dataset.batch(batch_size) 38 | # Prefetch data for faster consumption 39 | dataset = dataset.prefetch(batch_size) 40 | 41 | # Create an iterator over the dataset 42 | iterator = dataset.make_initializable_iterator() 43 | # Initialize the iterator 44 | sess.run(iterator.initializer) 45 | 46 | # Neural Net Input (images, labels) 47 | X, Y = iterator.get_next() 48 | 49 | 50 | # ----------------------------------------------- 51 | # THIS IS A CLASSIC CNN (see examples, section 3) 52 | # ----------------------------------------------- 53 | # Note that a few elements have changed (usage of sess run). 54 | 55 | # Create model 56 | def conv_net(x, n_classes, dropout, reuse, is_training): 57 | # Define a scope for reusing the variables 58 | with tf.variable_scope('ConvNet', reuse=reuse): 59 | # MNIST data input is a 1-D vector of 784 features (28*28 pixels) 60 | # Reshape to match picture format [Height x Width x Channel] 61 | # Tensor input become 4-D: [Batch Size, Height, Width, Channel] 62 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) 63 | 64 | # Convolution Layer with 32 filters and a kernel size of 5 65 | conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) 66 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 67 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) 68 | 69 | # Convolution Layer with 32 filters and a kernel size of 5 70 | conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu) 71 | # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 72 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) 73 | 74 | # Flatten the data to a 1-D vector for the fully connected layer 75 | fc1 = tf.contrib.layers.flatten(conv2) 76 | 77 | # Fully connected layer (in contrib folder for now) 78 | fc1 = tf.layers.dense(fc1, 1024) 79 | # Apply Dropout (if is_training is False, dropout is not applied) 80 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) 81 | 82 | # Output layer, class prediction 83 | out = tf.layers.dense(fc1, n_classes) 84 | # Because 'softmax_cross_entropy_with_logits' already apply softmax, 85 | # we only apply softmax to testing network 86 | out = tf.nn.softmax(out) if not is_training else out 87 | 88 | return out 89 | 90 | 91 | # Because Dropout have different behavior at training and prediction time, we 92 | # need to create 2 distinct computation graphs that share the same weights. 93 | 94 | # Create a graph for training 95 | logits_train = conv_net(X, n_classes, dropout, reuse=False, is_training=True) 96 | # Create another graph for testing that reuse the same weights, but has 97 | # different behavior for 'dropout' (not applied). 98 | logits_test = conv_net(X, n_classes, dropout, reuse=True, is_training=False) 99 | 100 | # Define loss and optimizer (with train logits, for dropout to take effect) 101 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 102 | logits=logits_train, labels=Y)) 103 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 104 | train_op = optimizer.minimize(loss_op) 105 | 106 | # Evaluate model (with test logits, for dropout to be disabled) 107 | correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(Y, 1)) 108 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 109 | 110 | # Initialize the variables (i.e. assign their default value) 111 | init = tf.global_variables_initializer() 112 | 113 | # Run the initializer 114 | sess.run(init) 115 | 116 | # Training cycle 117 | for step in range(1, num_steps + 1): 118 | 119 | # Run optimization 120 | sess.run(train_op) 121 | 122 | if step % display_step == 0 or step == 1: 123 | # Calculate batch loss and accuracy 124 | # (note that this consume a new batch of data) 125 | loss, acc = sess.run([loss_op, accuracy]) 126 | print("Step " + str(step) + ", Minibatch Loss= " + \ 127 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 128 | "{:.3f}".format(acc)) 129 | 130 | print("Optimization Finished!") 131 | -------------------------------------------------------------------------------- /tensorflow_v1/examples/6_MultiGPU/multigpu_basics.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | ''' 3 | Basic Multi GPU computation example using TensorFlow library. 4 | 5 | Author: Aymeric Damien 6 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 7 | ''' 8 | 9 | ''' 10 | This tutorial requires your machine to have 2 GPUs 11 | "/cpu:0": The CPU of your machine. 12 | "/gpu:0": The first GPU of your machine 13 | "/gpu:1": The second GPU of your machine 14 | ''' 15 | 16 | 17 | 18 | import numpy as np 19 | import tensorflow as tf 20 | import datetime 21 | 22 | # Processing Units logs 23 | log_device_placement = True 24 | 25 | # Num of multiplications to perform 26 | n = 10 27 | 28 | ''' 29 | Example: compute A^n + B^n on 2 GPUs 30 | Results on 8 cores with 2 GTX-980: 31 | * Single GPU computation time: 0:00:11.277449 32 | * Multi GPU computation time: 0:00:07.131701 33 | ''' 34 | # Create random large matrix 35 | A = np.random.rand(10000, 10000).astype('float32') 36 | B = np.random.rand(10000, 10000).astype('float32') 37 | 38 | # Create a graph to store results 39 | c1 = [] 40 | c2 = [] 41 | 42 | def matpow(M, n): 43 | if n < 1: #Abstract cases where n < 1 44 | return M 45 | else: 46 | return tf.matmul(M, matpow(M, n-1)) 47 | 48 | ''' 49 | Single GPU computing 50 | ''' 51 | with tf.device('/gpu:0'): 52 | a = tf.placeholder(tf.float32, [10000, 10000]) 53 | b = tf.placeholder(tf.float32, [10000, 10000]) 54 | # Compute A^n and B^n and store results in c1 55 | c1.append(matpow(a, n)) 56 | c1.append(matpow(b, n)) 57 | 58 | with tf.device('/cpu:0'): 59 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n 60 | 61 | t1_1 = datetime.datetime.now() 62 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 63 | # Run the op. 64 | sess.run(sum, {a:A, b:B}) 65 | t2_1 = datetime.datetime.now() 66 | 67 | 68 | ''' 69 | Multi GPU computing 70 | ''' 71 | # GPU:0 computes A^n 72 | with tf.device('/gpu:0'): 73 | # Compute A^n and store result in c2 74 | a = tf.placeholder(tf.float32, [10000, 10000]) 75 | c2.append(matpow(a, n)) 76 | 77 | # GPU:1 computes B^n 78 | with tf.device('/gpu:1'): 79 | # Compute B^n and store result in c2 80 | b = tf.placeholder(tf.float32, [10000, 10000]) 81 | c2.append(matpow(b, n)) 82 | 83 | with tf.device('/cpu:0'): 84 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n 85 | 86 | t1_2 = datetime.datetime.now() 87 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 88 | # Run the op. 89 | sess.run(sum, {a:A, b:B}) 90 | t2_2 = datetime.datetime.now() 91 | 92 | 93 | print("Single GPU computation time: " + str(t2_1-t1_1)) 94 | print("Multi GPU computation time: " + str(t2_2-t1_2)) 95 | -------------------------------------------------------------------------------- /tensorflow_v1/notebooks/0_Prerequisite/ml_introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Machine Learning\n", 8 | "\n", 9 | "Prior to start browsing the examples, it may be useful that you get familiar with machine learning, as TensorFlow is mostly used for machine learning tasks (especially Neural Networks). You can find below a list of useful links, that can give you the basic knowledge required for this TensorFlow Tutorial.\n", 10 | "\n", 11 | "## Machine Learning\n", 12 | "\n", 13 | "- [An Introduction to Machine Learning Theory and Its Applications: A Visual Tutorial with Examples](https://www.toptal.com/machine-learning/machine-learning-theory-an-introductory-primer)\n", 14 | "- [A Gentle Guide to Machine Learning](https://blog.monkeylearn.com/a-gentle-guide-to-machine-learning/)\n", 15 | "- [A Visual Introduction to Machine Learning](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/)\n", 16 | "- [Introduction to Machine Learning](http://alex.smola.org/drafts/thebook.pdf)\n", 17 | "\n", 18 | "## Deep Learning & Neural Networks\n", 19 | "\n", 20 | "- [An Introduction to Neural Networks](http://www.cs.stir.ac.uk/~lss/NNIntro/InvSlides.html)\n", 21 | "- [An Introduction to Image Recognition with Deep Learning](https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721)\n", 22 | "- [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/index.html)\n", 23 | "\n" 24 | ] 25 | } 26 | ], 27 | "metadata": { 28 | "kernelspec": { 29 | "display_name": "IPython (Python 2.7)", 30 | "language": "python", 31 | "name": "python2" 32 | }, 33 | "language_info": { 34 | "codemirror_mode": { 35 | "name": "ipython", 36 | "version": 2 37 | }, 38 | "file_extension": ".py", 39 | "mimetype": "text/x-python", 40 | "name": "python", 41 | "nbconvert_exporter": "python", 42 | "pygments_lexer": "ipython2", 43 | "version": "2.7.11" 44 | } 45 | }, 46 | "nbformat": 4, 47 | "nbformat_minor": 0 48 | } 49 | -------------------------------------------------------------------------------- /tensorflow_v1/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\n", 8 | "# MNIST Dataset Introduction\n", 9 | "\n", 10 | "Most examples are using MNIST dataset of handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n", 11 | "\n", 12 | "## Overview\n", 13 | "\n", 14 | "![MNIST Digits](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n", 15 | "\n", 16 | "## Usage\n", 17 | "In our examples, we are using TensorFlow [input_data.py](https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/mnist/input_data.py) script to load that dataset.\n", 18 | "It is quite useful for managing our data, and handle:\n", 19 | "\n", 20 | "- Dataset downloading\n", 21 | "\n", 22 | "- Loading the entire dataset into numpy array: \n", 23 | "\n", 24 | "\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "# Import MNIST\n", 36 | "from tensorflow.examples.tutorials.mnist import input_data\n", 37 | "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n", 38 | "\n", 39 | "# Load data\n", 40 | "X_train = mnist.train.images\n", 41 | "Y_train = mnist.train.labels\n", 42 | "X_test = mnist.test.images\n", 43 | "Y_test = mnist.test.labels" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "- A `next_batch` function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset)." 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": { 57 | "collapsed": true 58 | }, 59 | "outputs": [], 60 | "source": [ 61 | "# Get the next 64 images array and labels\n", 62 | "batch_X, batch_Y = mnist.train.next_batch(64)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "Link: http://yann.lecun.com/exdb/mnist/" 70 | ] 71 | } 72 | ], 73 | "metadata": { 74 | "kernelspec": { 75 | "display_name": "Python 2", 76 | "language": "python", 77 | "name": "python2" 78 | }, 79 | "language_info": { 80 | "codemirror_mode": { 81 | "name": "ipython", 82 | "version": 2 83 | }, 84 | "file_extension": ".py", 85 | "mimetype": "text/x-python", 86 | "name": "python", 87 | "nbconvert_exporter": "python", 88 | "pygments_lexer": "ipython2", 89 | "version": "2.7.13" 90 | } 91 | }, 92 | "nbformat": 4, 93 | "nbformat_minor": 0 94 | } 95 | -------------------------------------------------------------------------------- /tensorflow_v1/notebooks/1_Introduction/helloworld.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "# Simple hello world using TensorFlow\n", 23 | "\n", 24 | "# Create a Constant op\n", 25 | "# The op is added as a node to the default graph.\n", 26 | "#\n", 27 | "# The value returned by the constructor represents the output\n", 28 | "# of the Constant op.\n", 29 | "\n", 30 | "hello = tf.constant('Hello, TensorFlow!')" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": { 37 | "collapsed": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "# Start tf session\n", 42 | "sess = tf.Session()" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 4, 48 | "metadata": { 49 | "collapsed": false 50 | }, 51 | "outputs": [ 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "Hello, TensorFlow!\n" 57 | ] 58 | } 59 | ], 60 | "source": [ 61 | "# Run graph\n", 62 | "print(sess.run(hello))" 63 | ] 64 | } 65 | ], 66 | "metadata": { 67 | "kernelspec": { 68 | "display_name": "IPython (Python 2.7)", 69 | "language": "python", 70 | "name": "python2" 71 | }, 72 | "language_info": { 73 | "codemirror_mode": { 74 | "name": "ipython", 75 | "version": 2.0 76 | }, 77 | "file_extension": ".py", 78 | "mimetype": "text/x-python", 79 | "name": "python", 80 | "nbconvert_exporter": "python", 81 | "pygments_lexer": "ipython2", 82 | "version": "2.7.8" 83 | } 84 | }, 85 | "nbformat": 4, 86 | "nbformat_minor": 0 87 | } 88 | -------------------------------------------------------------------------------- /tensorflow_v1/notebooks/6_MultiGPU/multigpu_basics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Multi-GPU Basics\n", 10 | "\n", 11 | "Basic Multi-GPU computation example using TensorFlow library.\n", 12 | "\n", 13 | "This tutorial requires your machine to have 2 GPUs\n", 14 | "\"/cpu:0\": The CPU of your machine.\n", 15 | "\"/gpu:0\": The first GPU of your machine\n", 16 | "\"/gpu:1\": The second GPU of your machine\n", 17 | "For this example, we are using 2 GTX-980\n", 18 | "\n", 19 | "- Author: Aymeric Damien\n", 20 | "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": { 27 | "collapsed": true 28 | }, 29 | "outputs": [], 30 | "source": [ 31 | "import numpy as np\n", 32 | "import tensorflow as tf\n", 33 | "import datetime" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "metadata": { 40 | "collapsed": true 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "#Processing Units logs\n", 45 | "log_device_placement = True\n", 46 | "\n", 47 | "#num of multiplications to perform\n", 48 | "n = 10" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": { 55 | "collapsed": false 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# Example: compute A^n + B^n on 2 GPUs\n", 60 | "\n", 61 | "# Create random large matrix\n", 62 | "A = np.random.rand(1e4, 1e4).astype('float32')\n", 63 | "B = np.random.rand(1e4, 1e4).astype('float32')\n", 64 | "\n", 65 | "# Creates a graph to store results\n", 66 | "c1 = []\n", 67 | "c2 = []\n", 68 | "\n", 69 | "# Define matrix power\n", 70 | "def matpow(M, n):\n", 71 | " if n < 1: #Abstract cases where n < 1\n", 72 | " return M\n", 73 | " else:\n", 74 | " return tf.matmul(M, matpow(M, n-1))" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 6, 80 | "metadata": { 81 | "collapsed": true 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "# Single GPU computing\n", 86 | "\n", 87 | "with tf.device('/gpu:0'):\n", 88 | " a = tf.constant(A)\n", 89 | " b = tf.constant(B)\n", 90 | " #compute A^n and B^n and store results in c1\n", 91 | " c1.append(matpow(a, n))\n", 92 | " c1.append(matpow(b, n))\n", 93 | "\n", 94 | "with tf.device('/cpu:0'):\n", 95 | " sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n\n", 96 | "\n", 97 | "t1_1 = datetime.datetime.now()\n", 98 | "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n", 99 | " # Runs the op.\n", 100 | " sess.run(sum)\n", 101 | "t2_1 = datetime.datetime.now()" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 7, 107 | "metadata": { 108 | "collapsed": true 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "# Multi GPU computing\n", 113 | "# GPU:0 computes A^n\n", 114 | "with tf.device('/gpu:0'):\n", 115 | " #compute A^n and store result in c2\n", 116 | " a = tf.constant(A)\n", 117 | " c2.append(matpow(a, n))\n", 118 | "\n", 119 | "#GPU:1 computes B^n\n", 120 | "with tf.device('/gpu:1'):\n", 121 | " #compute B^n and store result in c2\n", 122 | " b = tf.constant(B)\n", 123 | " c2.append(matpow(b, n))\n", 124 | "\n", 125 | "with tf.device('/cpu:0'):\n", 126 | " sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n\n", 127 | "\n", 128 | "t1_2 = datetime.datetime.now()\n", 129 | "with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n", 130 | " # Runs the op.\n", 131 | " sess.run(sum)\n", 132 | "t2_2 = datetime.datetime.now()" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 8, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "Single GPU computation time: 0:00:11.833497\n", 147 | "Multi GPU computation time: 0:00:07.085913\n" 148 | ] 149 | } 150 | ], 151 | "source": [ 152 | "print \"Single GPU computation time: \" + str(t2_1-t1_1)\n", 153 | "print \"Multi GPU computation time: \" + str(t2_2-t1_2)" 154 | ] 155 | } 156 | ], 157 | "metadata": { 158 | "anaconda-cloud": {}, 159 | "kernelspec": { 160 | "display_name": "Python [default]", 161 | "language": "python", 162 | "name": "python2" 163 | }, 164 | "language_info": { 165 | "codemirror_mode": { 166 | "name": "ipython", 167 | "version": 2 168 | }, 169 | "file_extension": ".py", 170 | "mimetype": "text/x-python", 171 | "name": "python", 172 | "nbconvert_exporter": "python", 173 | "pygments_lexer": "ipython2", 174 | "version": "2.7.12" 175 | } 176 | }, 177 | "nbformat": 4, 178 | "nbformat_minor": 0 179 | } 180 | -------------------------------------------------------------------------------- /tensorflow_v2/notebooks/0_Prerequisite/ml_introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Machine Learning\n", 10 | "\n", 11 | "Prior to start browsing the examples, it may be useful that you get familiar with machine learning, as TensorFlow is mostly used for machine learning tasks (especially Neural Networks). You can find below a list of useful links, that can give you the basic knowledge required for this TensorFlow Tutorial.\n", 12 | "\n", 13 | "## Machine Learning\n", 14 | "\n", 15 | "- [An Introduction to Machine Learning Theory and Its Applications: A Visual Tutorial with Examples](https://www.toptal.com/machine-learning/machine-learning-theory-an-introductory-primer)\n", 16 | "- [A Gentle Guide to Machine Learning](https://monkeylearn.com/blog/gentle-guide-to-machine-learning/)\n", 17 | "- [A Visual Introduction to Machine Learning](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/)\n", 18 | "- [Introduction to Machine Learning](http://alex.smola.org/drafts/thebook.pdf)\n", 19 | "\n", 20 | "## Deep Learning & Neural Networks\n", 21 | "\n", 22 | "- [An Introduction to Neural Networks](http://www.cs.stir.ac.uk/~lss/NNIntro/InvSlides.html)\n", 23 | "- [An Introduction to Image Recognition with Deep Learning](https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721)\n", 24 | "- [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/index.html)\n", 25 | "\n" 26 | ] 27 | } 28 | ], 29 | "metadata": { 30 | "kernelspec": { 31 | "display_name": "IPython (Python 2.7)", 32 | "language": "python", 33 | "name": "python2" 34 | }, 35 | "language_info": { 36 | "codemirror_mode": { 37 | "name": "ipython", 38 | "version": 2 39 | }, 40 | "file_extension": ".py", 41 | "mimetype": "text/x-python", 42 | "name": "python", 43 | "nbconvert_exporter": "python", 44 | "pygments_lexer": "ipython2", 45 | "version": "2.7.11" 46 | } 47 | }, 48 | "nbformat": 4, 49 | "nbformat_minor": 0 50 | } 51 | -------------------------------------------------------------------------------- /tensorflow_v2/notebooks/0_Prerequisite/mnist_dataset_intro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\n", 8 | "# MNIST Dataset Introduction\n", 9 | "\n", 10 | "Most examples are using MNIST dataset of handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).\n", 11 | "\n", 12 | "## Overview\n", 13 | "\n", 14 | "![MNIST Digits](http://neuralnetworksanddeeplearning.com/images/mnist_100_digits.png)\n", 15 | "\n", 16 | "## Usage\n", 17 | "In our examples, we are using TensorFlow [input_data.py](https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/mnist/input_data.py) script to load that dataset.\n", 18 | "It is quite useful for managing our data, and handle:\n", 19 | "\n", 20 | "- Dataset downloading\n", 21 | "\n", 22 | "- Loading the entire dataset into numpy array: \n", 23 | "\n", 24 | "\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# Import MNIST\n", 34 | "from tensorflow.examples.tutorials.mnist import input_data\n", 35 | "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n", 36 | "\n", 37 | "# Load data\n", 38 | "X_train = mnist.train.images\n", 39 | "Y_train = mnist.train.labels\n", 40 | "X_test = mnist.test.images\n", 41 | "Y_test = mnist.test.labels" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "- A `next_batch` function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset)." 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": null, 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "# Get the next 64 images array and labels\n", 58 | "batch_X, batch_Y = mnist.train.next_batch(64)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "Link: http://yann.lecun.com/exdb/mnist/" 66 | ] 67 | } 68 | ], 69 | "metadata": { 70 | "kernelspec": { 71 | "display_name": "Python 2", 72 | "language": "python", 73 | "name": "python2" 74 | }, 75 | "language_info": { 76 | "codemirror_mode": { 77 | "name": "ipython", 78 | "version": 2 79 | }, 80 | "file_extension": ".py", 81 | "mimetype": "text/x-python", 82 | "name": "python", 83 | "nbconvert_exporter": "python", 84 | "pygments_lexer": "ipython2", 85 | "version": "2.7.18" 86 | } 87 | }, 88 | "nbformat": 4, 89 | "nbformat_minor": 1 90 | } 91 | -------------------------------------------------------------------------------- /tensorflow_v2/notebooks/1_Introduction/basic_operations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Basic Tensor Operations\n", 8 | "\n", 9 | "Basic tensor operations using TensorFlow v2.\n", 10 | "\n", 11 | "- Author: Aymeric Damien\n", 12 | "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from __future__ import print_function\n", 22 | "import tensorflow as tf" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "# Define tensor constants.\n", 32 | "a = tf.constant(2)\n", 33 | "b = tf.constant(3)\n", 34 | "c = tf.constant(5)" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 3, 40 | "metadata": {}, 41 | "outputs": [ 42 | { 43 | "name": "stdout", 44 | "output_type": "stream", 45 | "text": [ 46 | "add = 5\n", 47 | "sub = -1\n", 48 | "mul = 6\n", 49 | "div = 0.6666666666666666\n" 50 | ] 51 | } 52 | ], 53 | "source": [ 54 | "# Various tensor operations.\n", 55 | "# Note: Tensors also support python operators (+, *, ...)\n", 56 | "add = tf.add(a, b)\n", 57 | "sub = tf.subtract(a, b)\n", 58 | "mul = tf.multiply(a, b)\n", 59 | "div = tf.divide(a, b)\n", 60 | "\n", 61 | "# Access tensors value.\n", 62 | "print(\"add =\", add.numpy())\n", 63 | "print(\"sub =\", sub.numpy())\n", 64 | "print(\"mul =\", mul.numpy())\n", 65 | "print(\"div =\", div.numpy())" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 4, 71 | "metadata": {}, 72 | "outputs": [ 73 | { 74 | "name": "stdout", 75 | "output_type": "stream", 76 | "text": [ 77 | "mean = 3\n", 78 | "sum = 10\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "# Some more operations.\n", 84 | "mean = tf.reduce_mean([a, b, c])\n", 85 | "sum = tf.reduce_sum([a, b, c])\n", 86 | "\n", 87 | "# Access tensors value.\n", 88 | "print(\"mean =\", mean.numpy())\n", 89 | "print(\"sum =\", sum.numpy())" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 5, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "# Matrix multiplications.\n", 99 | "matrix1 = tf.constant([[1., 2.], [3., 4.]])\n", 100 | "matrix2 = tf.constant([[5., 6.], [7., 8.]])\n", 101 | "\n", 102 | "product = tf.matmul(matrix1, matrix2)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 6, 108 | "metadata": {}, 109 | "outputs": [ 110 | { 111 | "data": { 112 | "text/plain": [ 113 | "" 116 | ] 117 | }, 118 | "execution_count": 6, 119 | "metadata": {}, 120 | "output_type": "execute_result" 121 | } 122 | ], 123 | "source": [ 124 | "# Display Tensor.\n", 125 | "product" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 7, 131 | "metadata": {}, 132 | "outputs": [ 133 | { 134 | "data": { 135 | "text/plain": [ 136 | "array([[19., 22.],\n", 137 | " [43., 50.]], dtype=float32)" 138 | ] 139 | }, 140 | "execution_count": 7, 141 | "metadata": {}, 142 | "output_type": "execute_result" 143 | } 144 | ], 145 | "source": [ 146 | "# Convert Tensor to Numpy.\n", 147 | "product.numpy()" 148 | ] 149 | } 150 | ], 151 | "metadata": { 152 | "kernelspec": { 153 | "display_name": "Python 2", 154 | "language": "python", 155 | "name": "python2" 156 | }, 157 | "language_info": { 158 | "codemirror_mode": { 159 | "name": "ipython", 160 | "version": 2 161 | }, 162 | "file_extension": ".py", 163 | "mimetype": "text/x-python", 164 | "name": "python", 165 | "nbconvert_exporter": "python", 166 | "pygments_lexer": "ipython2", 167 | "version": "2.7.15" 168 | } 169 | }, 170 | "nbformat": 4, 171 | "nbformat_minor": 2 172 | } 173 | -------------------------------------------------------------------------------- /tensorflow_v2/notebooks/1_Introduction/helloworld.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Hello World\n", 8 | "\n", 9 | "A very simple \"hello world\" using TensorFlow v2 tensors.\n", 10 | "\n", 11 | "- Author: Aymeric Damien\n", 12 | "- Project: https://github.com/aymericdamien/TensorFlow-Examples/" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import tensorflow as tf" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": {}, 28 | "outputs": [ 29 | { 30 | "name": "stdout", 31 | "output_type": "stream", 32 | "text": [ 33 | "tf.Tensor(hello world, shape=(), dtype=string)\n" 34 | ] 35 | } 36 | ], 37 | "source": [ 38 | "# Create a Tensor.\n", 39 | "hello = tf.constant(\"hello world\")\n", 40 | "print(hello)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 3, 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "name": "stdout", 50 | "output_type": "stream", 51 | "text": [ 52 | "hello world\n" 53 | ] 54 | } 55 | ], 56 | "source": [ 57 | "# To access a Tensor value, call numpy().\n", 58 | "print(hello.numpy())" 59 | ] 60 | } 61 | ], 62 | "metadata": { 63 | "kernelspec": { 64 | "display_name": "Python 2", 65 | "language": "python", 66 | "name": "python2" 67 | }, 68 | "language_info": { 69 | "codemirror_mode": { 70 | "name": "ipython", 71 | "version": 2 72 | }, 73 | "file_extension": ".py", 74 | "mimetype": "text/x-python", 75 | "name": "python", 76 | "nbconvert_exporter": "python", 77 | "pygments_lexer": "ipython2", 78 | "version": "2.7.15" 79 | } 80 | }, 81 | "nbformat": 4, 82 | "nbformat_minor": 2 83 | } 84 | --------------------------------------------------------------------------------