├── mult.py ├── redneuronalsimple.py ├── README.md ├── regression.py ├── kmeans.py ├── CNN.py ├── multiGPU.py └── input_data.py /mult.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | a = tf.placeholder("float") 4 | b = tf.placeholder("float") 5 | 6 | y = tf.mul(a, b) 7 | 8 | sess = tf.Session() 9 | 10 | print sess.run(y, feed_dict={a: 3, b: 3}) 11 | -------------------------------------------------------------------------------- /redneuronalsimple.py: -------------------------------------------------------------------------------- 1 | import input_data 2 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 3 | 4 | import tensorflow as tf 5 | x = tf.placeholder("float", [None, 784]) 6 | W = tf.Variable(tf.zeros([784,10])) 7 | b = tf.Variable(tf.zeros([10])) 8 | matm=tf.matmul(x,W) 9 | y = tf.nn.softmax(tf.matmul(x,W) + b) 10 | y_ = tf.placeholder("float", [None,10]) 11 | cross_entropy = -tf.reduce_sum(y_*tf.log(y)) 12 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) 13 | sess = tf.Session() 14 | sess.run(tf.initialize_all_variables()) 15 | for i in range(1000): 16 | batch_xs, batch_ys = mnist.train.next_batch(100) 17 | sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) 18 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) 19 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 20 | print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | WARNING FOR NEW READERS: In fact, this book was written back one year ago, during 2015 Christmas break. TensorFlow captivated me and I wanted to share this knowledge with my students. Keep in mind when you read this book that it was based in an old TensorFlow release (tensorflow-0.5.0) and it is only suitable for introduction purpose. Current release (tensorflow-0.12.0) have many new and important features. No one ever believed this book was going to become such a great and successful help for many students and practitioners. Knowing this is an honor for me, thank you! But in my opinion now this book already has to give the witness to more current ones (based in tensorflow-0.12.0). 5 | 6 | 7 | 8 | # Código del libro "Hello World en TensorFlow" 9 | ### Versión abierta del libro se puede encontrar en http://jorditorres.org/tensorflow/ 10 | 11 | [![solarized dualmode](http://www.jorditorres.org/wp-content/uploads/2016/01/Portada.Libro_-668x1024.png)](#features) 12 | -------------------------------------------------------------------------------- /regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | num_puntos = 100 4 | conjunto_puntos = [] 5 | for i in xrange(num_puntos): 6 | x1= np.random.normal(0.0, 0.9) 7 | y1= x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.05) 8 | conjunto_puntos.append([x1, y1]) 9 | 10 | x_data = [v[0] for v in conjunto_puntos] 11 | y_data = [v[1] for v in conjunto_puntos] 12 | 13 | 14 | import matplotlib.pyplot as plt 15 | 16 | #Graphic display 17 | plt.plot(x_data, y_data, 'ro') 18 | plt.legend() 19 | plt.show() 20 | 21 | import tensorflow as tf 22 | 23 | 24 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) 25 | b = tf.Variable(tf.zeros([1])) 26 | y = W * x_data + b 27 | 28 | loss = tf.reduce_mean(tf.square(y - y_data)) 29 | optimizer = tf.train.GradientDescentOptimizer(0.5) 30 | train = optimizer.minimize(loss) 31 | 32 | init = tf.global_variables_initializer() 33 | 34 | sess = tf.Session() 35 | sess.run(init) 36 | 37 | for step in xrange(101): 38 | sess.run(train) 39 | if step % 10 == 0: 40 | print(step, sess.run(W), sess.run(b)) 41 | 42 | plt.plot(x_data, y_data, 'ro') 43 | plt.plot(x_data, sess.run(W) * x_data + sess.run(b)) 44 | plt.legend() 45 | plt.show() 46 | -------------------------------------------------------------------------------- /kmeans.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pandas as pd 4 | import seaborn as sns 5 | import tensorflow as tf 6 | num_vectors = 1000 7 | num_clusters = 3 8 | num_steps = 100 9 | vector_values = [] 10 | for i in xrange(num_vectors): 11 | if np.random.random() > 0.5: 12 | vector_values.append([np.random.normal(0.5, 0.6), 13 | np.random.normal(0.3, 0.9)]) 14 | else: 15 | vector_values.append([np.random.normal(2.5, 0.4), 16 | np.random.normal(0.8, 0.5)]) 17 | df = pd.DataFrame({"x": [v[0] for v in vector_values], 18 | "y": [v[1] for v in vector_values]}) 19 | sns.lmplot("x", "y", data=df, fit_reg=False, size=7) 20 | plt.show() 21 | vectors = tf.constant(vector_values) 22 | centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors), 23 | [0,0],[num_clusters,-1])) 24 | expanded_vectors = tf.expand_dims(vectors, 0) 25 | expanded_centroids = tf.expand_dims(centroids, 1) 26 | 27 | print expanded_vectors.get_shape() 28 | print expanded_centroids.get_shape() 29 | 30 | distances = tf.reduce_sum( 31 | tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2) 32 | assignments = tf.argmin(distances, 0) 33 | 34 | 35 | means = tf.concat(0, [ 36 | tf.reduce_mean( 37 | tf.gather(vectors, 38 | tf.reshape( 39 | tf.where( 40 | tf.equal(assignments, c) 41 | ),[1,-1]) 42 | ),reduction_indices=[1]) 43 | for c in xrange(num_clusters)]) 44 | 45 | update_centroids = tf.assign(centroids, means) 46 | init_op = tf.initialize_all_variables() 47 | 48 | #with tf.Session('local') as sess: 49 | sess = tf.Session() 50 | sess.run(init_op) 51 | 52 | for step in xrange(num_steps): 53 | _, centroid_values, assignment_values = sess.run([update_centroids, 54 | centroids, 55 | assignments]) 56 | print "centroids" 57 | print centroid_values 58 | 59 | 60 | data = {"x": [], "y": [], "cluster": []} 61 | for i in xrange(len(assignment_values)): 62 | data["x"].append(vector_values[i][0]) 63 | data["y"].append(vector_values[i][1]) 64 | data["cluster"].append(assignment_values[i]) 65 | df = pd.DataFrame(data) 66 | sns.lmplot("x", "y", data=df, 67 | fit_reg=False, size=7, 68 | hue="cluster", legend=False) 69 | plt.show() 70 | -------------------------------------------------------------------------------- /CNN.py: -------------------------------------------------------------------------------- 1 | import input_data 2 | mnist = input_data.read_data_sets('MNIST_data', one_hot=True) 3 | import tensorflow as tf 4 | 5 | 6 | x = tf.placeholder("float", shape=[None, 784]) 7 | y_ = tf.placeholder("float", shape=[None, 10]) 8 | 9 | x_image = tf.reshape(x, [-1,28,28,1]) 10 | print "x_image=" 11 | print x_image 12 | 13 | def weight_variable(shape): 14 | initial = tf.truncated_normal(shape, stddev=0.1) 15 | return tf.Variable(initial) 16 | 17 | def bias_variable(shape): 18 | initial = tf.constant(0.1, shape=shape) 19 | return tf.Variable(initial) 20 | 21 | def conv2d(x, W): 22 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 23 | 24 | def max_pool_2x2(x): 25 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], 26 | strides=[1, 2, 2, 1], padding='SAME') 27 | 28 | W_conv1 = weight_variable([5, 5, 1, 32]) 29 | b_conv1 = bias_variable([32]) 30 | 31 | 32 | 33 | h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) 34 | h_pool1 = max_pool_2x2(h_conv1) 35 | 36 | 37 | W_conv2 = weight_variable([5, 5, 32, 64]) 38 | b_conv2 = bias_variable([64]) 39 | 40 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 41 | h_pool2 = max_pool_2x2(h_conv2) 42 | 43 | 44 | W_fc1 = weight_variable([7 * 7 * 64, 1024]) 45 | b_fc1 = bias_variable([1024]) 46 | 47 | h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) 48 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 49 | 50 | 51 | keep_prob = tf.placeholder("float") 52 | h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 53 | 54 | 55 | W_fc2 = weight_variable([1024, 10]) 56 | b_fc2 = bias_variable([10]) 57 | 58 | y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) 59 | 60 | 61 | cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) 62 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 63 | correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) 64 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 65 | 66 | sess = tf.Session() 67 | 68 | sess.run(tf.initialize_all_variables()) 69 | for i in range(200): 70 | batch = mnist.train.next_batch(50) 71 | if i%10 == 0: 72 | train_accuracy = sess.run( accuracy, feed_dict={ 73 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 74 | print("step %d, training accuracy %g"%(i, train_accuracy)) 75 | 76 | sess.run(train_step,feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 77 | 78 | print("test accuracy %g"% sess.run(accuracy, feed_dict={ 79 | x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) 80 | -------------------------------------------------------------------------------- /multiGPU.py: -------------------------------------------------------------------------------- 1 | #Multi GPU Basic example 2 | # code source: Github (2016) Aymeric Damien: https://github.com/aymericdamien/TensorFlow-Examples 3 | ''' 4 | This tutorial requires your machine to have 2 GPUs 5 | "/cpu:0": The CPU of your machine. 6 | "/gpu:0": The first GPU of your machine 7 | "/gpu:1": The second GPU of your machine 8 | ''' 9 | 10 | import numpy as np 11 | import tensorflow as tf 12 | import datetime 13 | 14 | #Processing Units logs 15 | log_device_placement = True 16 | 17 | #num of multiplications to perform 18 | n = 10 19 | 20 | ''' 21 | Example: compute A^n + B^n on 2 GPUs 22 | Results on 8 cores with 2 GTX-980: 23 | * Single GPU computation time: 0:00:11.277449 24 | * Multi GPU computation time: 0:00:07.131701 25 | ''' 26 | #Create random large matrix 27 | A = np.random.rand(1e4, 1e4).astype('float32') 28 | B = np.random.rand(1e4, 1e4).astype('float32') 29 | 30 | # Creates a graph to store results 31 | c1 = [] 32 | c2 = [] 33 | 34 | def matpow(M, n): 35 | if n < 1: #Abstract cases where n < 1 36 | return M 37 | else: 38 | return tf.matmul(M, matpow(M, n-1)) 39 | 40 | ''' 41 | Single GPU computing 42 | ''' 43 | with tf.device('/gpu:0'): 44 | a = tf.constant(A) 45 | b = tf.constant(B) 46 | #compute A^n and B^n and store results in c1 47 | c1.append(matpow(a, n)) 48 | c1.append(matpow(b, n)) 49 | 50 | with tf.device('/cpu:0'): 51 | sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n 52 | 53 | t1_1 = datetime.datetime.now() 54 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 55 | # Runs the op. 56 | sess.run(sum) 57 | t2_1 = datetime.datetime.now() 58 | 59 | 60 | ''' 61 | Multi GPU computing 62 | ''' 63 | #GPU:0 computes A^n 64 | with tf.device('/gpu:0'): 65 | #compute A^n and store result in c2 66 | a = tf.constant(A) 67 | c2.append(matpow(a, n)) 68 | 69 | #GPU:1 computes B^n 70 | with tf.device('/gpu:1'): 71 | #compute B^n and store result in c2 72 | b = tf.constant(B) 73 | c2.append(matpow(b, n)) 74 | 75 | with tf.device('/cpu:0'): 76 | sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n 77 | 78 | t1_2 = datetime.datetime.now() 79 | with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: 80 | # Runs the op. 81 | sess.run(sum) 82 | t2_2 = datetime.datetime.now() 83 | 84 | 85 | print "Single GPU computation time: " + str(t2_1-t1_1) 86 | print "Multi GPU computation time: " + str(t2_2-t1_2) 87 | 88 | -------------------------------------------------------------------------------- /input_data.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # Fichero descargado de https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/g3doc/tutorials/mnist/input_data.py 3 | # --- 4 | # Copyright 2015 Google Inc. All Rights Reserved. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # ============================================================================== 18 | """Functions for downloading and reading MNIST data.""" 19 | from __future__ import absolute_import 20 | from __future__ import division 21 | from __future__ import print_function 22 | import gzip 23 | import os 24 | import numpy 25 | from six.moves import urllib 26 | from six.moves import xrange # pylint: disable=redefined-builtin 27 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 28 | def maybe_download(filename, work_directory): 29 | """Download the data from Yann's website, unless it's already here.""" 30 | if not os.path.exists(work_directory): 31 | os.mkdir(work_directory) 32 | filepath = os.path.join(work_directory, filename) 33 | if not os.path.exists(filepath): 34 | filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) 35 | statinfo = os.stat(filepath) 36 | print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') 37 | return filepath 38 | def _read32(bytestream): 39 | dt = numpy.dtype(numpy.uint32).newbyteorder('>') 40 | return numpy.frombuffer(bytestream.read(4), dtype=dt) 41 | def extract_images(filename): 42 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" 43 | print('Extracting', filename) 44 | with gzip.open(filename) as bytestream: 45 | magic = _read32(bytestream) 46 | if magic != 2051: 47 | raise ValueError( 48 | 'Invalid magic number %d in MNIST image file: %s' % 49 | (magic, filename)) 50 | num_images = _read32(bytestream) 51 | rows = _read32(bytestream) 52 | cols = _read32(bytestream) 53 | buf = bytestream.read(rows * cols * num_images) 54 | data = numpy.frombuffer(buf, dtype=numpy.uint8) 55 | data = data.reshape(num_images, rows, cols, 1) 56 | return data 57 | def dense_to_one_hot(labels_dense, num_classes=10): 58 | """Convert class labels from scalars to one-hot vectors.""" 59 | num_labels = labels_dense.shape[0] 60 | index_offset = numpy.arange(num_labels) * num_classes 61 | labels_one_hot = numpy.zeros((num_labels, num_classes)) 62 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 63 | return labels_one_hot 64 | def extract_labels(filename, one_hot=False): 65 | """Extract the labels into a 1D uint8 numpy array [index].""" 66 | print('Extracting', filename) 67 | with gzip.open(filename) as bytestream: 68 | magic = _read32(bytestream) 69 | if magic != 2049: 70 | raise ValueError( 71 | 'Invalid magic number %d in MNIST label file: %s' % 72 | (magic, filename)) 73 | num_items = _read32(bytestream) 74 | buf = bytestream.read(num_items) 75 | labels = numpy.frombuffer(buf, dtype=numpy.uint8) 76 | if one_hot: 77 | return dense_to_one_hot(labels) 78 | return labels 79 | class DataSet(object): 80 | def __init__(self, images, labels, fake_data=False): 81 | if fake_data: 82 | self._num_examples = 10000 83 | else: 84 | assert images.shape[0] == labels.shape[0], ( 85 | "images.shape: %s labels.shape: %s" % (images.shape, 86 | labels.shape)) 87 | self._num_examples = images.shape[0] 88 | # Convert shape from [num examples, rows, columns, depth] 89 | # to [num examples, rows*columns] (assuming depth == 1) 90 | assert images.shape[3] == 1 91 | images = images.reshape(images.shape[0], 92 | images.shape[1] * images.shape[2]) 93 | # Convert from [0, 255] -> [0.0, 1.0]. 94 | images = images.astype(numpy.float32) 95 | images = numpy.multiply(images, 1.0 / 255.0) 96 | self._images = images 97 | self._labels = labels 98 | self._epochs_completed = 0 99 | self._index_in_epoch = 0 100 | @property 101 | def images(self): 102 | return self._images 103 | @property 104 | def labels(self): 105 | return self._labels 106 | @property 107 | def num_examples(self): 108 | return self._num_examples 109 | @property 110 | def epochs_completed(self): 111 | return self._epochs_completed 112 | def next_batch(self, batch_size, fake_data=False): 113 | """Return the next `batch_size` examples from this data set.""" 114 | if fake_data: 115 | fake_image = [1.0 for _ in xrange(784)] 116 | fake_label = 0 117 | return [fake_image for _ in xrange(batch_size)], [ 118 | fake_label for _ in xrange(batch_size)] 119 | start = self._index_in_epoch 120 | self._index_in_epoch += batch_size 121 | if self._index_in_epoch > self._num_examples: 122 | # Finished epoch 123 | self._epochs_completed += 1 124 | # Shuffle the data 125 | perm = numpy.arange(self._num_examples) 126 | numpy.random.shuffle(perm) 127 | self._images = self._images[perm] 128 | self._labels = self._labels[perm] 129 | # Start next epoch 130 | start = 0 131 | self._index_in_epoch = batch_size 132 | assert batch_size <= self._num_examples 133 | end = self._index_in_epoch 134 | return self._images[start:end], self._labels[start:end] 135 | def read_data_sets(train_dir, fake_data=False, one_hot=False): 136 | class DataSets(object): 137 | pass 138 | data_sets = DataSets() 139 | if fake_data: 140 | data_sets.train = DataSet([], [], fake_data=True) 141 | data_sets.validation = DataSet([], [], fake_data=True) 142 | data_sets.test = DataSet([], [], fake_data=True) 143 | return data_sets 144 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' 145 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' 146 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz' 147 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz' 148 | VALIDATION_SIZE = 5000 149 | local_file = maybe_download(TRAIN_IMAGES, train_dir) 150 | train_images = extract_images(local_file) 151 | local_file = maybe_download(TRAIN_LABELS, train_dir) 152 | train_labels = extract_labels(local_file, one_hot=one_hot) 153 | local_file = maybe_download(TEST_IMAGES, train_dir) 154 | test_images = extract_images(local_file) 155 | local_file = maybe_download(TEST_LABELS, train_dir) 156 | test_labels = extract_labels(local_file, one_hot=one_hot) 157 | validation_images = train_images[:VALIDATION_SIZE] 158 | validation_labels = train_labels[:VALIDATION_SIZE] 159 | train_images = train_images[VALIDATION_SIZE:] 160 | train_labels = train_labels[VALIDATION_SIZE:] 161 | data_sets.train = DataSet(train_images, train_labels) 162 | data_sets.validation = DataSet(validation_images, validation_labels) 163 | data_sets.test = DataSet(test_images, test_labels) 164 | return data_sets 165 | --------------------------------------------------------------------------------