├── .gitignore ├── ReadMe.md ├── boston.py ├── hdf5.py ├── iris_classifier.py ├── random_forest.py ├── resnet.py └── simplest.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /ReadMe.md: -------------------------------------------------------------------------------- 1 | # Neural Networks Playground project 2 | 3 | A simple playground project for TensorFlow Python lib 4 | 5 | All thoughts, tries, all investigations and research is here. -------------------------------------------------------------------------------- /boston.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | from sklearn import datasets 7 | from sklearn import metrics 8 | from sklearn import model_selection 9 | from sklearn import preprocessing 10 | 11 | import tensorflow as tf 12 | 13 | 14 | def main(unused_argv): 15 | # Load dataset 16 | boston = datasets.load_boston() 17 | x, y = boston.data, boston.target 18 | 19 | # Split dataset into train / test 20 | x_train, x_test, y_train, y_test = model_selection.train_test_split( 21 | x, y, test_size=0.2, random_state=42) 22 | 23 | # Scale data (training set) to 0 mean and unit standard deviation. 24 | scaler = preprocessing.StandardScaler() 25 | x_train = scaler.fit_transform(x_train) 26 | 27 | # Build 2 layer fully connected DNN with 10, 10 units respectively. 28 | feature_columns = [ 29 | tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])] 30 | regressor = tf.estimator.DNNRegressor( 31 | feature_columns=feature_columns, hidden_units=[10, 10]) 32 | 33 | # Train. 34 | train_input_fn = tf.estimator.inputs.numpy_input_fn( 35 | x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True) 36 | regressor.train(input_fn=train_input_fn, steps=2000) 37 | 38 | # Predict. 39 | x_transformed = scaler.transform(x_test) 40 | test_input_fn = tf.estimator.inputs.numpy_input_fn( 41 | x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False) 42 | predictions = regressor.predict(input_fn=test_input_fn) 43 | y_predicted = np.array(list(p['predictions'] for p in predictions)) 44 | y_predicted = y_predicted.reshape(np.array(y_test).shape) 45 | 46 | # Score with sklearn. 47 | score_sklearn = metrics.mean_squared_error(y_predicted, y_test) 48 | print('MSE (sklearn): {0:f}'.format(score_sklearn)) 49 | 50 | # Score with tensorflow. 51 | scores = regressor.evaluate(input_fn=test_input_fn) 52 | print('MSE (tensorflow): {0:f}'.format(scores['average_loss'])) 53 | 54 | 55 | if __name__ == '__main__': 56 | tf.app.run() -------------------------------------------------------------------------------- /hdf5.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | from sklearn import datasets 7 | from sklearn import metrics 8 | from sklearn import model_selection 9 | import tensorflow as tf 10 | import h5py # pylint: disable=g-bad-import-order 11 | 12 | 13 | X_FEATURE = 'x' # Name of the input feature. 14 | 15 | 16 | def main(unused_argv): 17 | # Load dataset. 18 | iris = datasets.load_iris() 19 | x_train, x_test, y_train, y_test = model_selection.train_test_split( 20 | iris.data, iris.target, test_size=0.2, random_state=42) 21 | 22 | # Note that we are saving and load iris data as h5 format as a simple 23 | # demonstration here. 24 | h5f = h5py.File('/tmp/test_hdf5.h5', 'w') 25 | h5f.create_dataset('X_train', data=x_train) 26 | h5f.create_dataset('X_test', data=x_test) 27 | h5f.create_dataset('y_train', data=y_train) 28 | h5f.create_dataset('y_test', data=y_test) 29 | h5f.close() 30 | 31 | h5f = h5py.File('/tmp/test_hdf5.h5', 'r') 32 | x_train = np.array(h5f['X_train']) 33 | x_test = np.array(h5f['X_test']) 34 | y_train = np.array(h5f['y_train']) 35 | y_test = np.array(h5f['y_test']) 36 | 37 | # Build 3 layer DNN with 10, 20, 10 units respectively. 38 | feature_columns = [ 39 | tf.feature_column.numeric_column( 40 | X_FEATURE, shape=np.array(x_train).shape[1:])] 41 | classifier = tf.estimator.DNNClassifier( 42 | feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) 43 | 44 | # Train. 45 | train_input_fn = tf.estimator.inputs.numpy_input_fn( 46 | x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True) 47 | classifier.train(input_fn=train_input_fn, steps=200) 48 | 49 | # Predict. 50 | test_input_fn = tf.estimator.inputs.numpy_input_fn( 51 | x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) 52 | predictions = classifier.predict(input_fn=test_input_fn) 53 | y_predicted = np.array(list(p['class_ids'] for p in predictions)) 54 | y_predicted = y_predicted.reshape(np.array(y_test).shape) 55 | 56 | # Score with sklearn. 57 | score = metrics.accuracy_score(y_test, y_predicted) 58 | print('Accuracy (sklearn): {0:f}'.format(score)) 59 | 60 | # Score with tensorflow. 61 | scores = classifier.evaluate(input_fn=test_input_fn) 62 | print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) 63 | 64 | 65 | if __name__ == '__main__': 66 | tf.app.run() -------------------------------------------------------------------------------- /iris_classifier.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import os 6 | 7 | from six.moves.urllib.request import urlretrieve 8 | 9 | import tensorflow as tf 10 | 11 | # Data sets 12 | IRIS_TRAINING = 'iris_training.csv' 13 | IRIS_TRAINING_URL = 'http://download.tensorflow.org/data/iris_training.csv' 14 | 15 | IRIS_TEST = 'iris_test.csv' 16 | IRIS_TEST_URL = 'http://download.tensorflow.org/data/iris_test.csv' 17 | 18 | FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] 19 | 20 | 21 | def maybe_download_iris_data(file_name, download_url): 22 | """Downloads the file and returns the number of data.""" 23 | if not os.path.exists(file_name): 24 | urlretrieve(download_url, file_name) 25 | 26 | # The first line is a comma-separated string. The first one is the number of 27 | # total data in the file. 28 | with open(file_name, 'r') as f: 29 | first_line = f.readline() 30 | num_elements = first_line.split(',')[0] 31 | return int(num_elements) 32 | 33 | 34 | def input_fn(file_name, num_data, batch_size, is_training): 35 | """Creates an input_fn required by Estimator train/evaluate.""" 36 | # If the data sets aren't stored locally, download them. 37 | 38 | def _parse_csv(rows_string_tensor): 39 | """Takes the string input tensor and returns tuple of (features, labels).""" 40 | # Last dim is the label. 41 | num_features = len(FEATURE_KEYS) 42 | num_columns = num_features + 1 43 | columns = tf.decode_csv(rows_string_tensor, 44 | record_defaults=[[]] * num_columns) 45 | features = dict(zip(FEATURE_KEYS, columns[:num_features])) 46 | labels = tf.cast(columns[num_features], tf.int32) 47 | return features, labels 48 | 49 | def _input_fn(): 50 | """The input_fn.""" 51 | dataset = tf.data.TextLineDataset([file_name]) 52 | # Skip the first line (which does not have data). 53 | dataset = dataset.skip(1) 54 | dataset = dataset.map(_parse_csv) 55 | 56 | if is_training: 57 | # For this small dataset, which can fit into memory, to achieve true 58 | # randomness, the shuffle buffer size is set as the total number of 59 | # elements in the dataset. 60 | dataset = dataset.shuffle(num_data) 61 | dataset = dataset.repeat() 62 | 63 | dataset = dataset.batch(batch_size) 64 | iterator = dataset.make_one_shot_iterator() 65 | features, labels = iterator.get_next() 66 | return features, labels 67 | 68 | return _input_fn 69 | 70 | 71 | def main(unused_argv): 72 | tf.logging.set_verbosity(tf.logging.INFO) 73 | 74 | num_training_data = maybe_download_iris_data( 75 | IRIS_TRAINING, IRIS_TRAINING_URL) 76 | num_test_data = maybe_download_iris_data(IRIS_TEST, IRIS_TEST_URL) 77 | 78 | # Build 3 layer DNN with 10, 20, 10 units respectively. 79 | feature_columns = [ 80 | tf.feature_column.numeric_column(key, shape=1) for key in FEATURE_KEYS] 81 | classifier = tf.estimator.DNNClassifier( 82 | feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3) 83 | 84 | # Train. 85 | train_input_fn = input_fn(IRIS_TRAINING, num_training_data, batch_size=32, 86 | is_training=True) 87 | classifier.train(input_fn=train_input_fn, steps=400) 88 | 89 | # Eval. 90 | test_input_fn = input_fn(IRIS_TEST, num_test_data, batch_size=32, 91 | is_training=False) 92 | scores = classifier.evaluate(input_fn=test_input_fn) 93 | print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) 94 | 95 | 96 | if __name__ == '__main__': 97 | tf.app.run() 98 | -------------------------------------------------------------------------------- /random_forest.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import argparse 6 | import sys 7 | import tempfile 8 | 9 | import numpy 10 | 11 | from tensorflow.contrib.learn.python.learn import metric_spec 12 | from tensorflow.contrib.tensor_forest.client import eval_metrics 13 | from tensorflow.contrib.tensor_forest.client import random_forest 14 | from tensorflow.contrib.tensor_forest.python import tensor_forest 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | from tensorflow.python.estimator.inputs import numpy_io 17 | from tensorflow.python.platform import app 18 | 19 | FLAGS = None 20 | 21 | 22 | def build_estimator(model_dir): 23 | """Build an estimator.""" 24 | params = tensor_forest.ForestHParams( 25 | num_classes=10, 26 | num_features=784, 27 | num_trees=FLAGS.num_trees, 28 | max_nodes=FLAGS.max_nodes) 29 | graph_builder_class = tensor_forest.RandomForestGraphs 30 | if FLAGS.use_training_loss: 31 | graph_builder_class = tensor_forest.TrainingLossForest 32 | return random_forest.TensorForestEstimator( 33 | params, graph_builder_class=graph_builder_class, model_dir=model_dir) 34 | 35 | 36 | def train_and_eval(): 37 | """Train and evaluate the model.""" 38 | model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir 39 | print('model directory = %s' % model_dir) 40 | 41 | est = build_estimator(model_dir) 42 | 43 | mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False) 44 | 45 | train_input_fn = numpy_io.numpy_input_fn( 46 | x={'images': mnist.train.images}, 47 | y=mnist.train.labels.astype(numpy.int32), 48 | batch_size=FLAGS.batch_size, 49 | num_epochs=None, 50 | shuffle=True) 51 | est.fit(input_fn=train_input_fn, steps=None) 52 | 53 | metric_name = 'accuracy' 54 | metric = { 55 | metric_name: 56 | metric_spec.MetricSpec( 57 | eval_metrics.get_metric(metric_name), 58 | prediction_key=eval_metrics.get_prediction_key(metric_name)) 59 | } 60 | 61 | test_input_fn = numpy_io.numpy_input_fn( 62 | x={'images': mnist.test.images}, 63 | y=mnist.test.labels.astype(numpy.int32), 64 | num_epochs=1, 65 | batch_size=FLAGS.batch_size, 66 | shuffle=False) 67 | 68 | results = est.evaluate(input_fn=test_input_fn, metrics=metric) 69 | for key in sorted(results): 70 | print('%s: %s' % (key, results[key])) 71 | 72 | 73 | def main(_): 74 | train_and_eval() 75 | 76 | 77 | if __name__ == '__main__': 78 | parser = argparse.ArgumentParser() 79 | parser.add_argument( 80 | '--model_dir', 81 | type=str, 82 | default='', 83 | help='Base directory for output models.' 84 | ) 85 | parser.add_argument( 86 | '--data_dir', 87 | type=str, 88 | default='/tmp/data/', 89 | help='Directory for storing data' 90 | ) 91 | parser.add_argument( 92 | '--train_steps', 93 | type=int, 94 | default=1000, 95 | help='Number of training steps.' 96 | ) 97 | parser.add_argument( 98 | '--batch_size', 99 | type=str, 100 | default=1000, 101 | help='Number of examples in a training batch.' 102 | ) 103 | parser.add_argument( 104 | '--num_trees', 105 | type=int, 106 | default=100, 107 | help='Number of trees in the forest.' 108 | ) 109 | parser.add_argument( 110 | '--max_nodes', 111 | type=int, 112 | default=1000, 113 | help='Max total nodes in a single tree.' 114 | ) 115 | parser.add_argument( 116 | '--use_training_loss', 117 | type=bool, 118 | default=False, 119 | help='If true, use training loss as termination criteria.' 120 | ) 121 | FLAGS, unparsed = parser.parse_known_args() 122 | app.run(main=main, argv=[sys.argv[0]] + unparsed) -------------------------------------------------------------------------------- /resnet.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | from collections import namedtuple 6 | from math import sqrt 7 | 8 | import numpy as np 9 | import tensorflow as tf 10 | 11 | 12 | N_DIGITS = 10 # Number of digits. 13 | X_FEATURE = 'x' # Name of the input feature. 14 | 15 | 16 | def res_net_model(features, labels, mode): 17 | """Builds a residual network.""" 18 | 19 | # Configurations for each bottleneck group. 20 | BottleneckGroup = namedtuple('BottleneckGroup', 21 | ['num_blocks', 'num_filters', 'bottleneck_size']) 22 | groups = [ 23 | BottleneckGroup(3, 128, 32), BottleneckGroup(3, 256, 64), 24 | BottleneckGroup(3, 512, 128), BottleneckGroup(3, 1024, 256) 25 | ] 26 | 27 | x = features[X_FEATURE] 28 | input_shape = x.get_shape().as_list() 29 | 30 | # Reshape the input into the right shape if it's 2D tensor 31 | if len(input_shape) == 2: 32 | ndim = int(sqrt(input_shape[1])) 33 | x = tf.reshape(x, [-1, ndim, ndim, 1]) 34 | 35 | training = (mode == tf.estimator.ModeKeys.TRAIN) 36 | 37 | # First convolution expands to 64 channels 38 | with tf.variable_scope('conv_layer1'): 39 | net = tf.layers.conv2d( 40 | x, 41 | filters=64, 42 | kernel_size=7, 43 | activation=tf.nn.relu) 44 | net = tf.layers.batch_normalization(net, training=training) 45 | 46 | # Max pool 47 | net = tf.layers.max_pooling2d( 48 | net, pool_size=3, strides=2, padding='same') 49 | 50 | # First chain of resnets 51 | with tf.variable_scope('conv_layer2'): 52 | net = tf.layers.conv2d( 53 | net, 54 | filters=groups[0].num_filters, 55 | kernel_size=1, 56 | padding='valid') 57 | 58 | # Create the bottleneck groups, each of which contains `num_blocks` 59 | # bottleneck groups. 60 | for group_i, group in enumerate(groups): 61 | for block_i in range(group.num_blocks): 62 | name = 'group_%d/block_%d' % (group_i, block_i) 63 | 64 | # 1x1 convolution responsible for reducing dimension 65 | with tf.variable_scope(name + '/conv_in'): 66 | conv = tf.layers.conv2d( 67 | net, 68 | filters=group.num_filters, 69 | kernel_size=1, 70 | padding='valid', 71 | activation=tf.nn.relu) 72 | conv = tf.layers.batch_normalization(conv, training=training) 73 | 74 | with tf.variable_scope(name + '/conv_bottleneck'): 75 | conv = tf.layers.conv2d( 76 | conv, 77 | filters=group.bottleneck_size, 78 | kernel_size=3, 79 | padding='same', 80 | activation=tf.nn.relu) 81 | conv = tf.layers.batch_normalization(conv, training=training) 82 | 83 | # 1x1 convolution responsible for restoring dimension 84 | with tf.variable_scope(name + '/conv_out'): 85 | input_dim = net.get_shape()[-1].value 86 | conv = tf.layers.conv2d( 87 | conv, 88 | filters=input_dim, 89 | kernel_size=1, 90 | padding='valid', 91 | activation=tf.nn.relu) 92 | conv = tf.layers.batch_normalization(conv, training=training) 93 | 94 | # shortcut connections that turn the network into its counterpart 95 | # residual function (identity shortcut) 96 | net = conv + net 97 | 98 | try: 99 | # upscale to the next group size 100 | next_group = groups[group_i + 1] 101 | with tf.variable_scope('block_%d/conv_upscale' % group_i): 102 | net = tf.layers.conv2d( 103 | net, 104 | filters=next_group.num_filters, 105 | kernel_size=1, 106 | padding='same', 107 | activation=None, 108 | bias_initializer=None) 109 | except IndexError: 110 | pass 111 | 112 | net_shape = net.get_shape().as_list() 113 | net = tf.nn.avg_pool( 114 | net, 115 | ksize=[1, net_shape[1], net_shape[2], 1], 116 | strides=[1, 1, 1, 1], 117 | padding='VALID') 118 | 119 | net_shape = net.get_shape().as_list() 120 | net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]]) 121 | 122 | # Compute logits (1 per class) and compute loss. 123 | logits = tf.layers.dense(net, N_DIGITS, activation=None) 124 | 125 | # Compute predictions. 126 | predicted_classes = tf.argmax(logits, 1) 127 | if mode == tf.estimator.ModeKeys.PREDICT: 128 | predictions = { 129 | 'class': predicted_classes, 130 | 'prob': tf.nn.softmax(logits) 131 | } 132 | return tf.estimator.EstimatorSpec(mode, predictions=predictions) 133 | 134 | # Compute loss. 135 | loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) 136 | 137 | # Create training op. 138 | if training: 139 | optimizer = tf.train.AdagradOptimizer(learning_rate=0.01) 140 | train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) 141 | return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) 142 | 143 | # Compute evaluation metrics. 144 | eval_metric_ops = { 145 | 'accuracy': tf.metrics.accuracy( 146 | labels=labels, predictions=predicted_classes) 147 | } 148 | return tf.estimator.EstimatorSpec( 149 | mode, loss=loss, eval_metric_ops=eval_metric_ops) 150 | 151 | 152 | def main(unused_args): 153 | # Download and load MNIST data. 154 | mnist = tf.contrib.learn.datasets.DATASETS['mnist']('/tmp/mnist') 155 | 156 | # Create a new resnet classifier. 157 | classifier = tf.estimator.Estimator(model_fn=res_net_model) 158 | 159 | tf.logging.set_verbosity(tf.logging.INFO) # Show training logs. 160 | 161 | # Train model and save summaries into logdir. 162 | train_input_fn = tf.estimator.inputs.numpy_input_fn( 163 | x={X_FEATURE: mnist.train.images}, 164 | y=mnist.train.labels.astype(np.int32), 165 | batch_size=100, 166 | num_epochs=None, 167 | shuffle=True) 168 | classifier.train(input_fn=train_input_fn, steps=100) 169 | 170 | # Calculate accuracy. 171 | test_input_fn = tf.estimator.inputs.numpy_input_fn( 172 | x={X_FEATURE: mnist.test.images}, 173 | y=mnist.test.labels.astype(np.int32), 174 | num_epochs=1, 175 | shuffle=False) 176 | scores = classifier.evaluate(input_fn=test_input_fn) 177 | print('Accuracy: {0:f}'.format(scores['accuracy'])) 178 | 179 | 180 | if __name__ == '__main__': 181 | tf.app.run() 182 | -------------------------------------------------------------------------------- /simplest.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | 9 | N_DIGITS = 10 # Number of digits. 10 | X_FEATURE = 'x' # Name of the input feature. 11 | 12 | 13 | def conv_model(features, labels, mode): 14 | """2-layer convolution model.""" 15 | # Reshape feature to 4d tensor with 2nd and 3rd dimensions being 16 | # image width and height final dimension being the number of color channels. 17 | feature = tf.reshape(features[X_FEATURE], [-1, 28, 28, 1]) 18 | 19 | # First conv layer will compute 32 features for each 5x5 patch 20 | with tf.variable_scope('conv_layer1'): 21 | h_conv1 = tf.layers.conv2d( 22 | feature, 23 | filters=32, 24 | kernel_size=[5, 5], 25 | padding='same', 26 | activation=tf.nn.relu) 27 | h_pool1 = tf.layers.max_pooling2d( 28 | h_conv1, pool_size=2, strides=2, padding='same') 29 | 30 | # Second conv layer will compute 64 features for each 5x5 patch. 31 | with tf.variable_scope('conv_layer2'): 32 | h_conv2 = tf.layers.conv2d( 33 | h_pool1, 34 | filters=64, 35 | kernel_size=[5, 5], 36 | padding='same', 37 | activation=tf.nn.relu) 38 | h_pool2 = tf.layers.max_pooling2d( 39 | h_conv2, pool_size=2, strides=2, padding='same') 40 | # reshape tensor into a batch of vectors 41 | h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) 42 | 43 | # Densely connected layer with 1024 neurons. 44 | h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu) 45 | h_fc1 = tf.layers.dropout( 46 | h_fc1, 47 | rate=0.5, 48 | training=(mode == tf.estimator.ModeKeys.TRAIN)) 49 | 50 | # Compute logits (1 per class) and compute loss. 51 | logits = tf.layers.dense(h_fc1, N_DIGITS, activation=None) 52 | 53 | # Compute predictions. 54 | predicted_classes = tf.argmax(logits, 1) 55 | if mode == tf.estimator.ModeKeys.PREDICT: 56 | predictions = { 57 | 'class': predicted_classes, 58 | 'prob': tf.nn.softmax(logits) 59 | } 60 | return tf.estimator.EstimatorSpec(mode, predictions=predictions) 61 | 62 | # Compute loss. 63 | loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) 64 | 65 | # Create training op. 66 | if mode == tf.estimator.ModeKeys.TRAIN: 67 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) 68 | train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) 69 | return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) 70 | 71 | # Compute evaluation metrics. 72 | eval_metric_ops = { 73 | 'accuracy': tf.metrics.accuracy( 74 | labels=labels, predictions=predicted_classes) 75 | } 76 | return tf.estimator.EstimatorSpec( 77 | mode, loss=loss, eval_metric_ops=eval_metric_ops) 78 | 79 | 80 | def main(unused_args): 81 | tf.logging.set_verbosity(tf.logging.INFO) 82 | 83 | ### Download and load MNIST dataset. 84 | mnist = tf.contrib.learn.datasets.DATASETS['mnist']('/tmp/mnist') 85 | train_input_fn = tf.estimator.inputs.numpy_input_fn( 86 | x={X_FEATURE: mnist.train.images}, 87 | y=mnist.train.labels.astype(np.int32), 88 | batch_size=100, 89 | num_epochs=None, 90 | shuffle=True) 91 | test_input_fn = tf.estimator.inputs.numpy_input_fn( 92 | x={X_FEATURE: mnist.train.images}, 93 | y=mnist.train.labels.astype(np.int32), 94 | num_epochs=1, 95 | shuffle=False) 96 | 97 | ### Linear classifier. 98 | feature_columns = [ 99 | tf.feature_column.numeric_column( 100 | X_FEATURE, shape=mnist.train.images.shape[1:])] 101 | 102 | classifier = tf.estimator.LinearClassifier( 103 | feature_columns=feature_columns, n_classes=N_DIGITS) 104 | classifier.train(input_fn=train_input_fn, steps=200) 105 | scores = classifier.evaluate(input_fn=test_input_fn) 106 | print('Accuracy (LinearClassifier): {0:f}'.format(scores['accuracy'])) 107 | 108 | ### Convolutional network 109 | classifier = tf.estimator.Estimator(model_fn=conv_model) 110 | classifier.train(input_fn=train_input_fn, steps=200) 111 | scores = classifier.evaluate(input_fn=test_input_fn) 112 | print('Accuracy (conv_model): {0:f}'.format(scores['accuracy'])) 113 | 114 | 115 | if __name__ == '__main__': 116 | tf.app.run() --------------------------------------------------------------------------------