├── README.md └── numeros.py /README.md: -------------------------------------------------------------------------------- 1 | # youtube-tensorflow-mnist 2 | Código para red neuronal en python+tensorflow y set de datos MNIST 3 | -------------------------------------------------------------------------------- /numeros.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function, unicode_literals 2 | 3 | import tensorflow as tf 4 | import tensorflow_datasets as tfds 5 | 6 | import math 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | import logging 10 | logger = tf.get_logger() 11 | 12 | logger.setLevel(logging.ERROR) 13 | 14 | 15 | dataset, metadata = tfds.load('mnist', as_supervised=True, with_info=True) 16 | train_dataset, test_dataset = dataset['train'], dataset['test'] 17 | 18 | class_names = [ 19 | 'Cero', 'Uno', 'Dos', 'Tres', 'Cuatro', 'Cinco', 'Seis', 20 | 'Siete', 'Ocho', 'Nueve' 21 | ] 22 | 23 | num_train_examples = metadata.splits['train'].num_examples 24 | num_test_examples = metadata.splits['test'].num_examples 25 | 26 | #Normalizar: Numeros de 0 a 255, que sean de 0 a 1 27 | def normalize(images, labels): 28 | images = tf.cast(images, tf.float32) 29 | images /= 255 30 | return images, labels 31 | 32 | train_dataset = train_dataset.map(normalize) 33 | test_dataset = test_dataset.map(normalize) 34 | 35 | #Estructura de la red 36 | model = tf.keras.Sequential([ 37 | tf.keras.layers.Flatten(input_shape=(28,28,1)), 38 | tf.keras.layers.Dense(64, activation=tf.nn.relu), 39 | tf.keras.layers.Dense(64, activation=tf.nn.relu), 40 | tf.keras.layers.Dense(10, activation=tf.nn.softmax) #para clasificacion 41 | ]) 42 | 43 | #Indicar las funciones a utilizar 44 | model.compile( 45 | optimizer='adam', 46 | loss='sparse_categorical_crossentropy', 47 | metrics=['accuracy'] 48 | ) 49 | 50 | #Aprendizaje por lotes de 32 cada lote 51 | BATCHSIZE = 32 52 | train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCHSIZE) 53 | test_dataset = test_dataset.batch(BATCHSIZE) 54 | 55 | #Realizar el aprendizaje 56 | model.fit( 57 | train_dataset, epochs=5, 58 | steps_per_epoch=math.ceil(num_train_examples/BATCHSIZE) #No sera necesario pronto 59 | ) 60 | 61 | #Evaluar nuestro modelo ya entrenado, contra el dataset de pruebas 62 | test_loss, test_accuracy = model.evaluate( 63 | test_dataset, steps=math.ceil(num_test_examples/32) 64 | ) 65 | 66 | print("Resultado en las pruebas: ", test_accuracy) 67 | 68 | 69 | for test_images, test_labels in test_dataset.take(1): 70 | test_images = test_images.numpy() 71 | test_labels = test_labels.numpy() 72 | predictions = model.predict(test_images) 73 | 74 | def plot_image(i, predictions_array, true_labels, images): 75 | predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i] 76 | plt.grid(False) 77 | plt.xticks([]) 78 | plt.yticks([]) 79 | 80 | plt.imshow(img[...,0], cmap=plt.cm.binary) 81 | 82 | predicted_label = np.argmax(predictions_array) 83 | if predicted_label == true_label: 84 | color = 'blue' 85 | else: 86 | color = 'red' 87 | 88 | plt.xlabel("Prediccion: {}".format(class_names[predicted_label]), color=color) 89 | 90 | def plot_value_array(i, predictions_array, true_label): 91 | predictions_array, true_label = predictions_array[i], true_label[i] 92 | plt.grid(False) 93 | plt.xticks([]) 94 | plt.yticks([]) 95 | thisplot = plt.bar(range(10), predictions_array, color="#888888") 96 | plt.ylim([0,1]) 97 | predicted_label = np.argmax(predictions_array) 98 | 99 | thisplot[predicted_label].set_color('red') 100 | thisplot[true_label].set_color('blue') 101 | 102 | numrows=5 103 | numcols=3 104 | numimages = numrows*numcols 105 | 106 | plt.figure(figsize=(2*2*numcols, 2*numrows)) 107 | for i in range(numimages): 108 | plt.subplot(numrows, 2*numcols, 2*i+1) 109 | plot_image(i, predictions, test_labels, test_images) 110 | plt.subplot(numrows, 2*numcols, 2*i+2) 111 | plot_value_array(i, predictions, test_labels) 112 | 113 | plt.show() --------------------------------------------------------------------------------