├── .gitignore ├── 01_Introduction ├── 01_How_TensorFlow_Works │ └── 01_How_TensorFlow_Works.ipynb ├── 02_Creating_and_Using_Tensors │ ├── 02_tensors.ipynb │ └── 02_tensors.py ├── 03_Using_Variables_and_Placeholders │ ├── 03_placeholders.ipynb │ └── 03_placeholders.py ├── 04_Working_with_Matrices │ ├── 04_matrices.ipynb │ └── 04_matrices.py ├── 05_Declaring_Operations │ ├── 05_operations.ipynb │ └── 05_operations.py ├── 06_Implementing_Activation_Functions │ ├── 06_activation_functions.ipynb │ └── 06_activation_functions.py ├── 07_Working_with_Data_Sources │ ├── 07_data_gathering.ipynb │ ├── 07_data_gathering.py │ └── birthweight_data │ │ └── birthweight.dat └── images │ ├── 01_outline.png │ ├── 02_variable.png │ ├── 03_placeholder.png │ ├── 06_activation_funs1.png │ └── 06_activation_funs2.png ├── 02_TensorFlow_Way ├── 01_Operations_as_a_Computational_Graph │ ├── 01_operations_on_a_graph.ipynb │ └── 01_operations_on_a_graph.py ├── 02_Layering_Nested_Operations │ ├── 02_layering_nested_operations.ipynb │ └── 02_layering_nested_operations.py ├── 03_Working_with_Multiple_Layers │ ├── 03_multiple_layers.ipynb │ └── 03_multiple_layers.py ├── 04_Implementing_Loss_Functions │ ├── 04_loss_functions.ipynb │ └── 04_loss_functions.py ├── 05_Implementing_Back_Propagation │ ├── 05_back_propagation.ipynb │ └── 05_back_propagation.py ├── 06_Working_with_Batch_and_Stochastic_Training │ ├── 06_batch_stochastic_training.ipynb │ └── 06_batch_stochastic_training.py ├── 07_Combining_Everything_Together │ ├── 07_combining_everything_together.ipynb │ └── 07_combining_everything_together.py ├── 08_Evaluating_Models │ ├── 08_evaluating_models.ipynb │ └── 08_evaluating_models.py └── images │ ├── 01_Operations_on_a_Graph.png │ ├── 02_Multiple_Operations.png │ ├── 03_Multiple_Layers.png │ ├── 04_loss_fun1.png │ ├── 04_loss_fun2.png │ ├── 06_Back_Propagation.png │ ├── 07_Combing_Everything_Together.png │ └── 08_Evaluating_Models.png ├── 03_Linear_Regression ├── 01_Using_the_Matrix_Inverse_Method │ ├── 01_lin_reg_inverse.ipynb │ └── 01_lin_reg_inverse.py ├── 02_Implementing_a_Decomposition_Method │ ├── 02_lin_reg_decomposition.ipynb │ └── 02_lin_reg_decomposition.py ├── 03_TensorFlow_Way_of_Linear_Regression │ ├── 03_lin_reg_tensorflow_way.ipynb │ └── 03_lin_reg_tensorflow_way.py ├── 04_Loss_Functions_in_Linear_Regressions │ ├── 04_lin_reg_l1_vs_l2.ipynb │ └── 04_lin_reg_l1_vs_l2.py ├── 05_Implementing_Deming_Regression │ ├── 05_deming_regression.py │ └── 05_demming_regression.ipynb ├── 06_Implementing_Lasso_and_Ridge_Regression │ ├── 06_lasso_and_ridge_regression.ipynb │ └── 06_lasso_and_ridge_regression.py ├── 07_Implementing_Elasticnet_Regression │ ├── 07_elasticnet_regression.ipynb │ └── 07_elasticnet_regression.py ├── 08_Implementing_Logistic_Regression │ ├── 08_logistic_regression.ipynb │ └── 08_logistic_regression.py └── images │ ├── 01_Inverse_Matrix_Method.png │ ├── 02_Cholesky_Decomposition.png │ ├── 03_lin_reg_fit.png │ ├── 03_lin_reg_loss.png │ ├── 04_L1_L2_learningrates.png │ ├── 04_L1_L2_loss.png │ ├── 04_L1_L2_loss2.png │ ├── 05_demming_reg.png │ ├── 05_demming_vs_linear_reg.png │ ├── 07_elasticnet_reg_loss.png │ ├── 08_logistic_reg_acc.png │ └── 08_logistic_reg_loss.png ├── 04_Support_Vector_Machines ├── 02_Working_with_Linear_SVMs │ ├── 02_linear_svm.ipynb │ └── 02_linear_svm.py ├── 03_Reduction_to_Linear_Regression │ ├── 03_support_vector_regression.ipynb │ └── 03_support_vector_regression.py ├── 04_Working_with_Kernels │ ├── 04_svm_kernels.ipynb │ └── 04_svm_kernels.py ├── 05_Implementing_Nonlinear_SVMs │ ├── 05_nonlinear_svm.ipynb │ └── 05_nonlinear_svm.py ├── 06_Implementing_Multiclass_SVMs │ ├── 06_multiclass_svm.ipynb │ └── 06_multiclass_svm.py └── images │ ├── 01_introduction.png │ ├── 02_linear_svm_accuracy.png │ ├── 02_linear_svm_loss.png │ ├── 02_linear_svm_output.png │ ├── 03_linear_svm_loss.png │ ├── 03_svm_regression_output.png │ ├── 04_linear_svm_gaussian.png │ ├── 04_nonlinear_data_linear_kernel.png │ ├── 05_non_linear_svms.png │ └── 06_multiclass_svm.png ├── 05_Nearest_Neighbor_Methods ├── 02_Working_with_Nearest_Neighbors │ ├── 02_nearest_neighbor.ipynb │ └── 02_nearest_neighbor.py ├── 03_Working_with_Text_Distances │ ├── 03_text_distances.ipynb │ └── 03_text_distances.py ├── 04_Computing_with_Mixed_Distance_Functions │ ├── 04_mixed_distance_functions_knn.ipynb │ └── 04_mixed_distance_functions_knn.py ├── 05_An_Address_Matching_Example │ ├── 05_address_matching.ipynb │ └── 05_address_matching.py ├── 06_Nearest_Neighbors_for_Image_Recognition │ ├── 06_image_recognition.ipynb │ └── 06_image_recognition.py └── images │ ├── 02_mse_vs_variance.png │ ├── 02_nn_histogram.png │ ├── 04_pred_vs_actual.png │ ├── 06_nn_image_recognition.png │ ├── image.png │ └── nearest_neighbor_intro.jpg ├── 06_Neural_Networks ├── 01_Introduction │ └── Introduction_to_Neural_Networks.ipynb ├── 02_Implementing_an_Operational_Gate │ ├── 02_gates.ipynb │ └── 02_gates.py ├── 03_Working_with_Activation_Functions │ ├── 03_activation_functions.ipynb │ └── 03_activation_functions.py ├── 04_Single_Hidden_Layer_Network │ ├── 04_single_hidden_layer_network.ipynb │ └── 04_single_hidden_layer_network.py ├── 05_Implementing_Different_Layers │ ├── 05_implementing_different_layers.ipynb │ └── 05_implementing_different_layers.py ├── 06_Using_Multiple_Layers │ ├── 06_using_a_multiple_layer_network.ipynb │ └── 06_using_a_multiple_layer_network.py ├── 07_Improving_Linear_Regression │ ├── 07_improving_linear_regression.ipynb │ └── 07_improving_linear_regression.py ├── 08_Learning_Tic_Tac_Toe │ ├── base_tic_tac_toe_moves.csv │ └── tic_tac_toe_moves.py └── images │ ├── 02_operational_gates.png │ ├── 03_activation1.png │ ├── 03_activation2.png │ ├── 04_nn_layout.png │ ├── 04_nn_loss.png │ ├── 06_nn_multiple_layers_loss.png │ ├── 07_lin_reg_acc.png │ ├── 07_lin_reg_loss.png │ ├── 08_tic_tac_toe_architecture.png │ ├── 08_tictactoe_layout.png │ ├── 08_tictactoe_loss.png │ └── image.png ├── 07_Natural_Language_Processing ├── 01_Introduction │ └── 01_introduction.ipynb ├── 02_Working_with_Bag_of_Words │ ├── 02_bag_of_words.ipynb │ └── 02_bag_of_words.py ├── 03_Implementing_tf_idf │ ├── 03_implementing_tf_idf.ipynb │ └── 03_implementing_tf_idf.py ├── 04_Working_With_Skip_Gram_Embeddings │ ├── 04_working_with_skipgram.ipynb │ └── 04_working_with_skipgram.py ├── 05_Working_With_CBOW_Embeddings │ ├── 05_Working_With_CBOW.ipynb │ ├── 05_Working_With_CBOW.py │ └── text_helpers.py ├── 06_Using_Word2Vec_Embeddings │ ├── 06_using_word2vec.ipynb │ ├── 06_using_word2vec.py │ └── text_helpers.py ├── 07_Sentiment_Analysis_With_Doc2Vec │ ├── 07_sentiment_with_doc2vec.ipynb │ ├── 07_sentiment_with_doc2vec.py │ └── text_helpers.py └── images │ ├── 02_bag_of_words.png │ ├── 03_tfidf_acc.png │ ├── 03_tfidf_loss.png │ ├── 04_skipgram_model.png │ ├── 05_cbow_model.png │ ├── 06_word2vec_acc.png │ ├── 06_word2vec_loss.png │ ├── 07_sentiment_doc2vec_loss.png │ └── image.png ├── 08_Convolutional_Neural_Networks ├── 02_Intro_to_CNN_MNIST │ ├── 02_introductory_cnn.ipynb │ └── 02_introductory_cnn.py ├── 03_CNN_CIFAR10 │ ├── 03_cnn_cifar10.ipynb │ └── 03_cnn_cifar10.py ├── 04_Retraining_Current_Architectures │ ├── 04_download_cifar10.ipynb │ └── 04_download_cifar10.py ├── 05_Stylenet_NeuralStyle │ ├── 05_stylenet.ipynb │ └── 05_stylenet.py ├── 06_Deepdream │ ├── 06_deepdream.ipynb │ ├── 06_deepdream.py │ └── book_cover.jpg └── images │ ├── 01_intro_cnn.png │ ├── 01_intro_cnn2.png │ ├── 02_cnn1_loss_acc.png │ ├── 02_cnn1_mnist_output.png │ ├── 03_cnn2_loss_acc.png │ ├── 05_stylenet_ex.png │ ├── 06_deepdream_ex.png │ ├── book_cover.jpg │ ├── image.png │ └── starry_night.jpg ├── 09_Recurrent_Neural_Networks ├── 01_Introduction │ └── readme.md ├── 02_Implementing_RNN_for_Spam_Prediction │ ├── 02_implementing_rnn.ipynb │ └── 02_implementing_rnn.py ├── 03_Implementing_LSTM │ ├── 03_implementing_lstm.ipynb │ └── 03_implementing_lstm.py ├── 04_Stacking_Multiple_LSTM_Layers │ ├── 04_stacking_multiple_lstm.ipynb │ └── 04_stacking_multiple_lstm.py ├── 05_Creating_A_Sequence_To_Sequence_Model │ ├── 05_seq2seq_translation.ipynb │ ├── 05_seq2seq_translation.py │ └── 05_translation_model_sample.py ├── 06_Training_A_Siamese_Similarity_Measure │ ├── 06_siamese_similarity_driver.ipynb │ ├── 06_siamese_similarity_driver.py │ └── siamese_similarity_model.py └── images │ ├── 01_RNN_Seq2Seq.png │ ├── 01_RNN_Single_Target.png │ ├── 02_RNN_Spam_Acc_Loss.png │ ├── 03_LSTM_Loss.png │ ├── 04_MultipleLSTM_Loss.png │ ├── 04_MultipleRNN_Architecture.png │ ├── 05_Seq2Seq_Loss.png │ ├── 06_Similarity_RNN.png │ ├── 06_Similarity_RNN_Architecture.png │ └── 06_Similarity_RNN_Diagram.png ├── 10_Taking_TensorFlow_to_Production ├── 01_Implementing_Unit_Tests │ └── 01_implementing_unit_tests.py ├── 02_Using_Multiple_Devices │ └── 02_using_multiple_devices.py ├── 03_Parallelizing_TensorFlow │ └── 03_parallelizing_tensorflow.py ├── 04_Production_Tips │ └── 04_production_tips_for_tf.py ├── 05_Production_Example │ ├── 05_production_ex_eval.py │ └── 05_production_ex_train.py ├── 06_Using_TensorFlow_Serving │ ├── 06_Using_TensorFlow_Serving_Client.py │ └── 06_Using_TensorFlow_Serving_Train.py └── images │ ├── file_structure.jpg │ └── image.png ├── 11_More_with_TensorFlow ├── 01_Visualizing_Computational_Graphs │ └── 01_using_tensorboard.py ├── 02_Working_with_a_Genetic_Algorithm │ └── 02_genetic_algorithm.py ├── 03_Clustering_Using_KMeans │ └── 03_k_means.py ├── 04_Solving_A_System_of_ODEs │ └── 04_solving_ode_system.py ├── 05_Using_a_Random_Forest │ └── 05_Using_a_Random_Forest.py ├── 06_Using_TensorFlow_with_Keras │ └── 06_Using_TensorFlow_with_Keras.py └── images │ ├── 01_tensorboard1.png │ ├── 01_tensorboard2.png │ ├── 01_tensorboard3.png │ ├── 02_genetic_algorithm.png │ ├── 03_kmeans.png │ └── 04_ode_system.png ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Standard python gitignore file 2 | *.csv 3 | !/06_Neural_Networks/08_Learning_Tic_Tac_Toe/base_tic_tac_toe_moves.csv 4 | *.en 5 | *.de 6 | *.tsv 7 | *.Rdata 8 | *.rdata 9 | *.ppt 10 | *.pptx 11 | *.docx 12 | *.xls 13 | *.xlsx 14 | *.gz 15 | .gitignore~ 16 | .Rhistory 17 | Readme.md~ 18 | readme.md~ 19 | LICENSE~ 20 | *.pkl 21 | *.ckpt 22 | checkpoint 23 | *.ckpt 24 | *.ckpy.meta 25 | *.pkl 26 | *.neg 27 | *.pos 28 | **/temp/ 29 | **/temp 30 | **/tmp/ 31 | **/tmp 32 | */tensorboard_logs/* 33 | */tensorboard_logs/ 34 | */tensorboard_logs 35 | ***/tensorboard_logs/* 36 | **/tensorboard_logs/ 37 | **/tensorboard_logs 38 | tensorboard_logs/ 39 | tensorboard_logs/* 40 | */tensorboard/* 41 | */tensorboard/ 42 | */tensorboard 43 | ***/tensorboard/* 44 | **/tensorboard/ 45 | **/tensorboard 46 | tensorboard/ 47 | tensorboard/* 48 | temp_* 49 | 50 | **/12_Reinforcement_Learning/ 51 | **/12_Reinforcement_Learning 52 | **/12_Reinforcement_Learning/* 53 | 54 | # Byte-compiled / optimized / DLL files 55 | __pycache__/ 56 | *.py[cod] 57 | *$py.class 58 | *.py~ 59 | 60 | # C extensions 61 | *.so 62 | 63 | # Distribution / packaging 64 | .Python 65 | env/ 66 | build/ 67 | develop-eggs/ 68 | dist/ 69 | downloads/ 70 | eggs/ 71 | .eggs/ 72 | lib/ 73 | lib64/ 74 | parts/ 75 | sdist/ 76 | var/ 77 | *.egg-info/ 78 | .installed.cfg 79 | *.egg 80 | 81 | temp/ 82 | temp*/ 83 | temp_*.* 84 | 85 | 86 | # PyInstaller 87 | # Usually these files are written by a python script from a template 88 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 89 | *.manifest 90 | *.spec 91 | 92 | # Installer logs 93 | pip-log.txt 94 | pip-delete-this-directory.txt 95 | 96 | # Unit test / coverage reports 97 | htmlcov/ 98 | .tox/ 99 | .coverage 100 | .coverage.* 101 | .cache 102 | nosetests.xml 103 | coverage.xml 104 | *,cover 105 | .hypothesis/ 106 | 107 | # Translations 108 | *.mo 109 | *.pot 110 | 111 | # Django stuff: 112 | *.log 113 | local_settings.py 114 | 115 | # Flask stuff: 116 | instance/ 117 | .webassets-cache 118 | 119 | # Scrapy stuff: 120 | .scrapy 121 | 122 | # Sphinx documentation 123 | docs/_build/ 124 | 125 | # PyBuilder 126 | target/ 127 | 128 | # IPython Notebook 129 | .ipynb_checkpoints 130 | 131 | # pyenv 132 | .python-version 133 | 134 | # celery beat schedule file 135 | celerybeat-schedule 136 | 137 | # dotenv 138 | .env 139 | 140 | # virtualenv 141 | venv/ 142 | ENV/ 143 | 144 | # Spyder/pycharm project settings 145 | .spyderproject 146 | .idea/ 147 | 148 | # Rope project settings 149 | .ropeproject 150 | 151 | # Ignore data sources 152 | */MNIST_data/* 153 | */birthweight_data/* 154 | -------------------------------------------------------------------------------- /01_Introduction/02_Creating_and_Using_Tensors/02_tensors.py: -------------------------------------------------------------------------------- 1 | # Tensors 2 | #---------------------------------- 3 | # 4 | # This function introduces various ways to create 5 | # tensors in TensorFlow 6 | 7 | import tensorflow as tf 8 | from tensorflow.python.framework import ops 9 | ops.reset_default_graph() 10 | 11 | # Introduce tensors in tf 12 | 13 | # Get graph handle 14 | sess = tf.Session() 15 | 16 | my_tensor = tf.zeros([1,20]) 17 | 18 | # Declare a variable 19 | my_var = tf.Variable(tf.zeros([1,20])) 20 | 21 | # Different kinds of variables 22 | row_dim = 2 23 | col_dim = 3 24 | 25 | # Zero initialized variable 26 | zero_var = tf.Variable(tf.zeros([row_dim, col_dim])) 27 | 28 | # One initialized variable 29 | ones_var = tf.Variable(tf.ones([row_dim, col_dim])) 30 | 31 | # shaped like other variable 32 | sess.run(zero_var.initializer) 33 | sess.run(ones_var.initializer) 34 | zero_similar = tf.Variable(tf.zeros_like(zero_var)) 35 | ones_similar = tf.Variable(tf.ones_like(ones_var)) 36 | 37 | sess.run(ones_similar.initializer) 38 | sess.run(zero_similar.initializer) 39 | 40 | # Fill shape with a constant 41 | fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) 42 | 43 | # Create a variable from a constant 44 | const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) 45 | # This can also be used to fill an array: 46 | const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) 47 | 48 | # Sequence generation 49 | linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end 50 | 51 | sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end 52 | 53 | # Random Numbers 54 | 55 | # Random Normal 56 | rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) 57 | 58 | # Add summaries to tensorboard 59 | merged = tf.summary.merge_all() 60 | 61 | # Initialize graph writer: 62 | writer = tf.summary.FileWriter("/tmp/variable_logs", graph=sess.graph) 63 | 64 | # Initialize operation 65 | initialize_op = tf.global_variables_initializer() 66 | 67 | # Run initialization of variable 68 | sess.run(initialize_op) -------------------------------------------------------------------------------- /01_Introduction/03_Using_Variables_and_Placeholders/03_placeholders.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "deletable": true, 7 | "editable": true 8 | }, 9 | "source": [ 10 | "# Placeholders\n", 11 | "\n", 12 | "We introduce how to use placeholders in TensorFlow.\n", 13 | "\n", 14 | "First we import the necessary libraries and reset the graph session." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "collapsed": false, 22 | "deletable": true, 23 | "editable": true 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "import numpy as np\n", 28 | "import tensorflow as tf\n", 29 | "from tensorflow.python.framework import ops\n", 30 | "ops.reset_default_graph()" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": { 36 | "deletable": true, 37 | "editable": true 38 | }, 39 | "source": [ 40 | "Start graph session" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 2, 46 | "metadata": { 47 | "collapsed": true, 48 | "deletable": true, 49 | "editable": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "sess = tf.Session()" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": { 59 | "deletable": true, 60 | "editable": true 61 | }, 62 | "source": [ 63 | "### Declare a placeholder\n", 64 | "\n", 65 | "We declare a placeholder by using TensorFlow's function, `tf.placeholder()`, which accepts a data-type argument (`tf.float32`) and a shape argument, `(4,4)`. Note that the shape can be a tuple or a list, `[4,4]`." 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 3, 71 | "metadata": { 72 | "collapsed": true, 73 | "deletable": true, 74 | "editable": true 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "x = tf.placeholder(tf.float32, shape=(4, 4))" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "metadata": { 84 | "deletable": true, 85 | "editable": true 86 | }, 87 | "source": [ 88 | "For illustration on how to use the placeholder, we create input data for it and an operation we can visualize on Tensorboard.\n", 89 | "\n", 90 | "Note the useage of `feed_dict`, where we feed in the value of x into the computational graph." 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 4, 96 | "metadata": { 97 | "collapsed": false, 98 | "deletable": true, 99 | "editable": true 100 | }, 101 | "outputs": [ 102 | { 103 | "name": "stdout", 104 | "output_type": "stream", 105 | "text": [ 106 | "[[ 0.1175806 0.88121527 0.00815445 0.93555111]\n", 107 | " [ 0.97369134 0.14595009 0.16398087 0.76570976]\n", 108 | " [ 0.67633879 0.11748746 0.01266815 0.32564184]\n", 109 | " [ 0.99007022 0.6825515 0.54524553 0.01503101]]\n" 110 | ] 111 | } 112 | ], 113 | "source": [ 114 | "# Input data to placeholder, note that 'rand_array' and 'x' are the same shape.\n", 115 | "rand_array = np.random.rand(4, 4)\n", 116 | "\n", 117 | "# Create a Tensor to perform an operation (here, y will be equal to x, a 4x4 matrix)\n", 118 | "y = tf.identity(x)\n", 119 | "\n", 120 | "# Print the output, feeding the value of x into the computational graph\n", 121 | "print(sess.run(y, feed_dict={x: rand_array}))" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": { 127 | "deletable": true, 128 | "editable": true 129 | }, 130 | "source": [ 131 | "To visualize this in Tensorboard, we merge summaries and write to a log file." 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 8, 137 | "metadata": { 138 | "collapsed": false, 139 | "deletable": true, 140 | "editable": true 141 | }, 142 | "outputs": [], 143 | "source": [ 144 | "merged = tf.summary.merge_all()\n", 145 | "writer = tf.summary.FileWriter(\"/tmp/variable_logs\", sess.graph)" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "metadata": { 151 | "deletable": true, 152 | "editable": true 153 | }, 154 | "source": [ 155 | "We run the following command at the prompt:\n", 156 | "\n", 157 | "`tensorboard --logdir=/tmp`\n", 158 | "\n", 159 | "Which will tell us where to navigate chrome to to visualize the computational graph. Default is\n", 160 | "\n", 161 | "`http://0.0.0.0:6006/`" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": { 167 | "deletable": true, 168 | "editable": true 169 | }, 170 | "source": [ 171 | "![Placeholders_in_Tensorboard](https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/images/03_placeholder.png)" 172 | ] 173 | } 174 | ], 175 | "metadata": { 176 | "kernelspec": { 177 | "display_name": "Python 3", 178 | "language": "python", 179 | "name": "python3" 180 | }, 181 | "language_info": { 182 | "codemirror_mode": { 183 | "name": "ipython", 184 | "version": 3 185 | }, 186 | "file_extension": ".py", 187 | "mimetype": "text/x-python", 188 | "name": "python", 189 | "nbconvert_exporter": "python", 190 | "pygments_lexer": "ipython3", 191 | "version": "3.5.2" 192 | } 193 | }, 194 | "nbformat": 4, 195 | "nbformat_minor": 2 196 | } 197 | -------------------------------------------------------------------------------- /01_Introduction/03_Using_Variables_and_Placeholders/03_placeholders.py: -------------------------------------------------------------------------------- 1 | # Placeholders 2 | #---------------------------------- 3 | # 4 | # This function introduces how to 5 | # use placeholders in TensorFlow 6 | 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # Using Placeholders 13 | sess = tf.Session() 14 | 15 | x = tf.placeholder(tf.float32, shape=(4, 4)) 16 | y = tf.identity(x) 17 | 18 | rand_array = np.random.rand(4, 4) 19 | 20 | merged = tf.summary.merge_all() 21 | 22 | writer = tf.summary.FileWriter("/tmp/variable_logs", sess.graph) 23 | 24 | print(sess.run(y, feed_dict={x: rand_array})) -------------------------------------------------------------------------------- /01_Introduction/04_Working_with_Matrices/04_matrices.py: -------------------------------------------------------------------------------- 1 | # Matrices and Matrix Operations 2 | #---------------------------------- 3 | # 4 | # This function introduces various ways to create 5 | # matrices and how to use them in TensorFlow 6 | 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # Declaring matrices 13 | sess = tf.Session() 14 | 15 | # Declaring matrices 16 | 17 | # Identity matrix 18 | identity_matrix = tf.diag([1.0,1.0,1.0]) 19 | print(sess.run(identity_matrix)) 20 | 21 | # 2x3 random norm matrix 22 | A = tf.truncated_normal([2,3]) 23 | print(sess.run(A)) 24 | 25 | # 2x3 constant matrix 26 | B = tf.fill([2,3], 5.0) 27 | print(sess.run(B)) 28 | 29 | # 3x2 random uniform matrix 30 | C = tf.random_uniform([3,2]) 31 | print(sess.run(C)) # Note that we are reinitializing, hence the new random variables 32 | 33 | # Create matrix from np array 34 | D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]])) 35 | print(sess.run(D)) 36 | 37 | # Matrix addition/subtraction 38 | print(sess.run(A+B)) 39 | print(sess.run(B-B)) 40 | 41 | # Matrix Multiplication 42 | print(sess.run(tf.matmul(B, identity_matrix))) 43 | 44 | # Matrix Transpose 45 | print(sess.run(tf.transpose(C))) # Again, new random variables 46 | 47 | # Matrix Determinant 48 | print(sess.run(tf.matrix_determinant(D))) 49 | 50 | # Matrix Inverse 51 | print(sess.run(tf.matrix_inverse(D))) 52 | 53 | # Cholesky Decomposition 54 | print(sess.run(tf.cholesky(identity_matrix))) 55 | 56 | # Eigenvalues and Eigenvectors 57 | print(sess.run(tf.self_adjoint_eig(D))) -------------------------------------------------------------------------------- /01_Introduction/05_Declaring_Operations/05_operations.py: -------------------------------------------------------------------------------- 1 | # Operations 2 | #---------------------------------- 3 | # 4 | # This function introduces various operations 5 | # in TensorFlow 6 | 7 | # Declaring Operations 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # Open graph session 13 | sess = tf.Session() 14 | 15 | # div() vs truediv() vs floordiv() 16 | print(sess.run(tf.div(3, 4))) 17 | print(sess.run(tf.truediv(3, 4))) 18 | print(sess.run(tf.floordiv(3.0, 4.0))) 19 | 20 | # Mod function 21 | print(sess.run(tf.mod(22.0, 5.0))) 22 | 23 | # Cross Product 24 | print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.]))) 25 | 26 | # Trig functions 27 | print(sess.run(tf.sin(3.1416))) 28 | print(sess.run(tf.cos(3.1416))) 29 | print(sess.run(tf.tan(3.1416/4.))) 30 | 31 | # Custom operation 32 | test_nums = range(15) 33 | 34 | 35 | def custom_polynomial(x_val): 36 | # Return 3x^2 - x + 10 37 | return tf.subtract(3 * tf.square(x_val), x_val) + 10 38 | 39 | print(sess.run(custom_polynomial(11))) 40 | 41 | # What should we get with list comprehension 42 | expected_output = [3*x*x-x+10 for x in test_nums] 43 | print(expected_output) 44 | 45 | # TensorFlow custom function output 46 | for num in test_nums: 47 | print(sess.run(custom_polynomial(num))) 48 | -------------------------------------------------------------------------------- /01_Introduction/06_Implementing_Activation_Functions/06_activation_functions.py: -------------------------------------------------------------------------------- 1 | # Activation Functions 2 | #---------------------------------- 3 | # 4 | # This function introduces activation 5 | # functions in TensorFlow 6 | 7 | # Implementing Activation Functions 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import tensorflow as tf 11 | from tensorflow.python.framework import ops 12 | ops.reset_default_graph() 13 | 14 | # Open graph session 15 | sess = tf.Session() 16 | 17 | # X range 18 | x_vals = np.linspace(start=-10., stop=10., num=100) 19 | 20 | # ReLU activation 21 | print(sess.run(tf.nn.relu([-3., 3., 10.]))) 22 | y_relu = sess.run(tf.nn.relu(x_vals)) 23 | 24 | # ReLU-6 activation 25 | print(sess.run(tf.nn.relu6([-3., 3., 10.]))) 26 | y_relu6 = sess.run(tf.nn.relu6(x_vals)) 27 | 28 | # Sigmoid activation 29 | print(sess.run(tf.nn.sigmoid([-1., 0., 1.]))) 30 | y_sigmoid = sess.run(tf.nn.sigmoid(x_vals)) 31 | 32 | # Hyper Tangent activation 33 | print(sess.run(tf.nn.tanh([-1., 0., 1.]))) 34 | y_tanh = sess.run(tf.nn.tanh(x_vals)) 35 | 36 | # Softsign activation 37 | print(sess.run(tf.nn.softsign([-1., 0., 1.]))) 38 | y_softsign = sess.run(tf.nn.softsign(x_vals)) 39 | 40 | # Softplus activation 41 | print(sess.run(tf.nn.softplus([-1., 0., 1.]))) 42 | y_softplus = sess.run(tf.nn.softplus(x_vals)) 43 | 44 | # Exponential linear activation 45 | print(sess.run(tf.nn.elu([-1., 0., 1.]))) 46 | y_elu = sess.run(tf.nn.elu(x_vals)) 47 | 48 | # Plot the different functions 49 | plt.plot(x_vals, y_softplus, 'r--', label='Softplus', linewidth=2) 50 | plt.plot(x_vals, y_relu, 'b:', label='ReLU', linewidth=2) 51 | plt.plot(x_vals, y_relu6, 'g-.', label='ReLU6', linewidth=2) 52 | plt.plot(x_vals, y_elu, 'k-', label='ExpLU', linewidth=0.5) 53 | plt.ylim([-1.5,7]) 54 | plt.legend(loc='upper left') 55 | plt.show() 56 | 57 | plt.plot(x_vals, y_sigmoid, 'r--', label='Sigmoid', linewidth=2) 58 | plt.plot(x_vals, y_tanh, 'b:', label='Tanh', linewidth=2) 59 | plt.plot(x_vals, y_softsign, 'g-.', label='Softsign', linewidth=2) 60 | plt.ylim([-2,2]) 61 | plt.legend(loc='upper left') 62 | plt.show() 63 | -------------------------------------------------------------------------------- /01_Introduction/07_Working_with_Data_Sources/07_data_gathering.py: -------------------------------------------------------------------------------- 1 | # Data gathering 2 | #---------------------------------- 3 | # 4 | # This function gives us the ways to access 5 | # the various data sets we will need 6 | 7 | # Data Gathering 8 | import matplotlib.pyplot as plt 9 | import tensorflow as tf 10 | from tensorflow.python.framework import ops 11 | ops.reset_default_graph() 12 | 13 | 14 | # Iris Data 15 | from sklearn import datasets 16 | 17 | iris = datasets.load_iris() 18 | print(len(iris.data)) 19 | print(len(iris.target)) 20 | print(iris.data[0]) 21 | print(set(iris.target)) 22 | 23 | # Low Birthrate Data 24 | import requests 25 | 26 | birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat' 27 | birth_file = requests.get(birthdata_url) 28 | birth_data = birth_file.text.split('\r\n') 29 | birth_header = birth_data[0].split('\t') 30 | birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1] 31 | print(len(birth_data)) 32 | print(len(birth_data[0])) 33 | 34 | 35 | # Housing Price Data 36 | import requests 37 | 38 | housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' 39 | housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] 40 | housing_file = requests.get(housing_url) 41 | housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1] 42 | print(len(housing_data)) 43 | print(len(housing_data[0])) 44 | 45 | 46 | # MNIST Handwriting Data 47 | from tensorflow.examples.tutorials.mnist import input_data 48 | 49 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 50 | print(len(mnist.train.images)) 51 | print(len(mnist.test.images)) 52 | print(len(mnist.validation.images)) 53 | print(mnist.train.labels[1,:]) 54 | 55 | # CIFAR-10 Image Category Dataset 56 | # The CIFAR-10 data ( https://www.cs.toronto.edu/~kriz/cifar.html ) contains 60,000 32x32 color images of 10 classes. 57 | # It was collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. 58 | # Alex Krizhevsky maintains the page referenced here. 59 | # This is such a common dataset, that there are built in functions in TensorFlow to access this data. 60 | 61 | # Running this command requires an internet connection and a few minutes to download all the images. 62 | (X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.cifar10.load_data() 63 | 64 | print(X_train.shape) 65 | print(y_train.shape) 66 | print(y_train[0,]) # this is a frog 67 | 68 | # Plot the 0-th image (a frog) 69 | from PIL import Image 70 | img = Image.fromarray(X_train[0,:,:,:]) 71 | plt.imshow(img) 72 | 73 | 74 | # Ham/Spam Text Data 75 | import requests 76 | import io 77 | from zipfile import ZipFile 78 | 79 | # Get/read zip file 80 | zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip' 81 | r = requests.get(zip_url) 82 | z = ZipFile(io.BytesIO(r.content)) 83 | file = z.read('SMSSpamCollection') 84 | # Format Data 85 | text_data = file.decode() 86 | text_data = text_data.encode('ascii',errors='ignore') 87 | text_data = text_data.decode().split('\n') 88 | text_data = [x.split('\t') for x in text_data if len(x)>=1] 89 | [text_data_target, text_data_train] = [list(x) for x in zip(*text_data)] 90 | print(len(text_data_train)) 91 | print(set(text_data_target)) 92 | print(text_data_train[1]) 93 | 94 | 95 | # Movie Review Data 96 | import requests 97 | import io 98 | import tarfile 99 | 100 | movie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz' 101 | r = requests.get(movie_data_url) 102 | # Stream data into temp object 103 | stream_data = io.BytesIO(r.content) 104 | tmp = io.BytesIO() 105 | while True: 106 | s = stream_data.read(16384) 107 | if not s: 108 | break 109 | tmp.write(s) 110 | stream_data.close() 111 | tmp.seek(0) 112 | # Extract tar file 113 | tar_file = tarfile.open(fileobj=tmp, mode="r:gz") 114 | pos = tar_file.extractfile('rt-polaritydata/rt-polarity.pos') 115 | neg = tar_file.extractfile('rt-polaritydata/rt-polarity.neg') 116 | # Save pos/neg reviews 117 | pos_data = [] 118 | for line in pos: 119 | pos_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode()) 120 | neg_data = [] 121 | for line in neg: 122 | neg_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode()) 123 | tar_file.close() 124 | 125 | print(len(pos_data)) 126 | print(len(neg_data)) 127 | print(neg_data[0]) 128 | 129 | 130 | # The Works of Shakespeare Data 131 | import requests 132 | 133 | shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt' 134 | # Get Shakespeare text 135 | response = requests.get(shakespeare_url) 136 | shakespeare_file = response.content 137 | # Decode binary into string 138 | shakespeare_text = shakespeare_file.decode('utf-8') 139 | # Drop first few descriptive paragraphs. 140 | shakespeare_text = shakespeare_text[7675:] 141 | print(len(shakespeare_text)) 142 | 143 | 144 | # English-German Sentence Translation Data 145 | import requests 146 | import io 147 | from zipfile import ZipFile 148 | sentence_url = 'http://www.manythings.org/anki/deu-eng.zip' 149 | r = requests.get(sentence_url) 150 | z = ZipFile(io.BytesIO(r.content)) 151 | file = z.read('deu.txt') 152 | # Format Data 153 | eng_ger_data = file.decode() 154 | eng_ger_data = eng_ger_data.encode('ascii',errors='ignore') 155 | eng_ger_data = eng_ger_data.decode().split('\n') 156 | eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x)>=1] 157 | [english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)] 158 | print(len(english_sentence)) 159 | print(len(german_sentence)) 160 | print(eng_ger_data[10]) 161 | -------------------------------------------------------------------------------- /01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat: -------------------------------------------------------------------------------- 1 | LOW AGE LWT RACE SMOKE PTL HT UI BWT 2 | 1 28 113 1 1 1 0 1 709 3 | 1 29 130 0 0 0 0 1 1021 4 | 1 34 187 1 1 0 1 0 1135 5 | 1 25 105 1 0 1 1 0 1330 6 | 1 25 85 1 0 0 0 1 1474 7 | 1 27 150 1 0 0 0 0 1588 8 | 1 23 97 1 0 0 0 1 1588 9 | 1 24 128 1 0 1 0 0 1701 10 | 1 24 132 1 0 0 1 0 1729 11 | 1 21 165 0 1 0 1 0 1790 12 | 1 32 105 1 1 0 0 0 1818 13 | 1 19 91 0 1 1 0 1 1885 14 | 1 25 115 1 0 0 0 0 1893 15 | 1 16 130 1 0 0 0 0 1899 16 | 1 25 92 0 1 0 0 0 1928 17 | 1 20 150 0 1 0 0 0 1928 18 | 1 21 190 1 0 0 0 1 1928 19 | 1 24 155 0 1 1 0 0 1936 20 | 1 21 103 1 0 0 0 0 1970 21 | 1 20 125 1 0 0 0 1 2055 22 | 1 25 89 1 0 1 0 0 2055 23 | 1 19 102 0 0 0 0 0 2082 24 | 1 19 112 0 1 0 0 1 2084 25 | 1 26 117 0 1 1 0 1 2084 26 | 1 24 138 0 0 0 0 0 2100 27 | 1 17 130 1 1 1 0 1 2125 28 | 1 20 120 1 1 0 0 0 2126 29 | 1 22 130 0 1 1 0 1 2187 30 | 1 27 130 1 0 0 0 1 2187 31 | 1 20 80 1 1 0 0 1 2211 32 | 1 17 110 0 1 0 0 0 2225 33 | 1 25 105 1 0 1 0 0 2240 34 | 1 20 109 1 0 0 0 0 2240 35 | 1 18 148 1 0 0 0 0 2282 36 | 1 18 110 1 1 1 0 0 2296 37 | 1 20 121 0 1 1 0 1 2296 38 | 1 21 100 1 0 1 0 0 2301 39 | 1 26 96 1 0 0 0 0 2325 40 | 1 31 102 0 1 1 0 0 2353 41 | 1 15 110 0 0 0 0 0 2353 42 | 1 23 187 1 1 0 0 0 2367 43 | 1 20 122 1 1 1 0 0 2381 44 | 1 24 105 1 1 0 0 0 2381 45 | 1 15 115 1 0 0 0 1 2381 46 | 1 23 120 1 0 0 0 0 2395 47 | 1 30 142 0 1 1 0 0 2410 48 | 1 22 130 0 1 0 0 0 2410 49 | 1 17 120 0 1 0 0 0 2414 50 | 1 23 110 0 1 1 0 0 2424 51 | 1 17 120 1 0 0 0 0 2438 52 | 1 26 154 1 0 1 1 0 2442 53 | 1 20 105 1 0 0 0 0 2450 54 | 1 26 168 0 1 0 0 0 2466 55 | 1 14 101 1 1 1 0 0 2466 56 | 1 28 95 0 1 0 0 0 2466 57 | 1 14 100 1 0 0 0 0 2495 58 | 1 23 94 1 1 0 0 0 2495 59 | 1 17 142 1 0 0 1 0 2495 60 | 1 21 130 0 1 0 1 0 2495 61 | 0 19 182 1 0 0 0 1 2523 62 | 0 33 155 1 0 0 0 0 2551 63 | 0 20 105 0 1 0 0 0 2557 64 | 0 21 108 0 1 0 0 1 2594 65 | 0 18 107 0 1 0 0 1 2600 66 | 0 21 124 1 0 0 0 0 2622 67 | 0 22 118 0 0 0 0 0 2637 68 | 0 17 103 1 0 0 0 0 2637 69 | 0 29 123 0 1 0 0 0 2663 70 | 0 26 113 0 1 0 0 0 2665 71 | 0 19 95 1 0 0 0 0 2722 72 | 0 19 150 1 0 0 0 0 2733 73 | 0 22 95 1 0 0 1 0 2750 74 | 0 30 107 1 0 1 0 1 2750 75 | 0 18 100 0 1 0 0 0 2769 76 | 0 18 100 1 1 0 0 0 2769 77 | 0 15 98 1 0 0 0 0 2778 78 | 0 25 118 0 1 0 0 0 2782 79 | 0 20 120 1 0 0 0 1 2807 80 | 0 28 120 0 1 0 0 0 2821 81 | 0 32 121 1 0 0 0 0 2835 82 | 0 31 100 0 0 0 0 1 2835 83 | 0 36 202 0 0 0 0 0 2836 84 | 0 28 120 1 0 0 0 0 2863 85 | 0 25 120 1 0 0 0 1 2877 86 | 0 28 167 0 0 0 0 0 2877 87 | 0 17 122 0 1 0 0 0 2906 88 | 0 29 150 0 0 0 0 0 2920 89 | 0 26 168 1 1 0 0 0 2920 90 | 0 17 113 1 0 0 0 0 2920 91 | 0 17 113 1 0 0 0 0 2920 92 | 0 24 90 0 1 1 0 0 2948 93 | 0 35 121 1 1 1 1 0 2948 94 | 0 25 155 0 1 1 0 0 2977 95 | 0 25 125 1 0 0 0 0 2977 96 | 0 29 140 0 1 0 0 0 2977 97 | 0 19 138 0 1 0 1 0 2977 98 | 0 27 124 0 1 0 0 0 2992 99 | 0 31 115 0 1 0 0 0 3005 100 | 0 33 109 0 1 0 0 0 3033 101 | 0 21 185 1 1 0 0 0 3042 102 | 0 19 189 0 0 0 0 0 3062 103 | 0 23 130 1 0 0 0 0 3062 104 | 0 21 160 0 0 0 0 0 3062 105 | 0 18 90 0 1 0 0 1 3076 106 | 0 18 90 0 1 0 0 1 3076 107 | 0 32 132 0 0 0 0 0 3080 108 | 0 19 132 1 0 0 0 0 3090 109 | 0 24 115 0 0 0 0 0 3090 110 | 0 22 85 1 1 0 0 0 3090 111 | 0 22 120 0 0 0 1 0 3100 112 | 0 23 128 1 0 0 0 0 3104 113 | 0 22 130 0 1 0 0 0 3132 114 | 0 30 95 0 1 0 0 0 3147 115 | 0 19 115 1 0 0 0 0 3175 116 | 0 16 110 1 0 0 0 0 3175 117 | 0 21 110 1 1 0 0 1 3203 118 | 0 30 153 1 0 0 0 0 3203 119 | 0 20 103 1 0 0 0 0 3203 120 | 0 17 119 1 0 0 0 0 3225 121 | 0 17 119 1 0 0 0 0 3225 122 | 0 23 119 1 0 0 0 0 3232 123 | 0 24 110 1 0 0 0 0 3232 124 | 0 28 140 0 0 0 0 0 3234 125 | 0 26 133 1 1 0 0 0 3260 126 | 0 20 169 1 0 1 0 1 3274 127 | 0 24 115 1 0 0 0 0 3274 128 | 0 28 250 1 1 0 0 0 3303 129 | 0 20 141 0 0 0 0 1 3317 130 | 0 22 158 1 0 1 0 0 3317 131 | 0 22 112 0 1 1 0 0 3317 132 | 0 31 150 1 1 0 0 0 3321 133 | 0 23 115 1 1 0 0 0 3331 134 | 0 16 112 1 0 0 0 0 3374 135 | 0 16 135 0 1 0 0 0 3374 136 | 0 18 229 1 0 0 0 0 3402 137 | 0 25 140 0 0 0 0 0 3416 138 | 0 32 134 0 1 1 0 0 3430 139 | 0 20 121 1 1 0 0 0 3444 140 | 0 23 190 0 0 0 0 0 3459 141 | 0 22 131 0 0 0 0 0 3460 142 | 0 32 170 0 0 0 0 0 3473 143 | 0 30 110 1 0 0 0 0 3475 144 | 0 20 127 1 0 0 0 0 3487 145 | 0 23 123 1 0 0 0 0 3544 146 | 0 17 120 1 1 0 0 0 3572 147 | 0 19 105 1 0 0 0 0 3572 148 | 0 23 130 0 0 0 0 0 3586 149 | 0 36 175 0 0 0 0 0 3600 150 | 0 22 125 0 0 0 0 0 3614 151 | 0 24 133 0 0 0 0 0 3614 152 | 0 21 134 1 0 0 0 0 3629 153 | 0 19 235 0 1 0 1 0 3629 154 | 0 25 200 0 0 1 0 1 3637 155 | 0 16 135 0 1 0 0 0 3643 156 | 0 29 135 0 0 0 0 0 3651 157 | 0 29 154 0 0 0 0 0 3651 158 | 0 19 147 0 1 0 0 0 3651 159 | 0 19 147 0 1 0 0 0 3651 160 | 0 30 137 0 0 0 0 0 3699 161 | 0 24 110 0 0 0 0 0 3728 162 | 0 19 184 0 1 0 1 0 3756 163 | 0 24 110 0 0 1 0 0 3770 164 | 0 23 110 0 0 0 0 0 3770 165 | 0 20 120 1 0 0 0 0 3770 166 | 0 25 141 0 0 0 1 0 3790 167 | 0 30 112 0 0 0 0 0 3799 168 | 0 22 169 0 0 0 0 0 3827 169 | 0 18 120 0 1 0 0 0 3856 170 | 0 16 170 1 0 0 0 0 3860 171 | 0 32 186 0 0 0 0 0 3860 172 | 0 18 120 1 0 0 0 0 3884 173 | 0 29 130 0 1 0 0 0 3884 174 | 0 33 117 0 0 0 0 1 3912 175 | 0 20 170 0 1 0 0 0 3940 176 | 0 28 134 1 0 0 0 0 3941 177 | 0 14 135 0 0 1 0 0 3941 178 | 0 28 130 1 0 0 0 0 3969 179 | 0 25 120 0 0 0 0 0 3983 180 | 0 16 135 1 0 0 0 0 3997 181 | 0 20 158 0 0 0 0 0 3997 182 | 0 26 160 0 0 0 0 0 4054 183 | 0 21 115 0 0 0 0 0 4054 184 | 0 22 129 0 0 0 0 0 4111 185 | 0 25 130 0 0 0 0 0 4153 186 | 0 31 120 0 0 0 0 0 4167 187 | 0 35 170 0 0 1 0 0 4174 188 | 0 19 120 0 1 0 1 0 4238 189 | 0 24 216 0 0 0 0 0 4593 190 | 0 45 123 0 0 1 0 0 4990 191 | 192 | -------------------------------------------------------------------------------- /01_Introduction/images/01_outline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/01_Introduction/images/01_outline.png -------------------------------------------------------------------------------- /01_Introduction/images/02_variable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/01_Introduction/images/02_variable.png -------------------------------------------------------------------------------- /01_Introduction/images/03_placeholder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/01_Introduction/images/03_placeholder.png -------------------------------------------------------------------------------- /01_Introduction/images/06_activation_funs1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/01_Introduction/images/06_activation_funs1.png -------------------------------------------------------------------------------- /01_Introduction/images/06_activation_funs2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/01_Introduction/images/06_activation_funs2.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/01_Operations_as_a_Computational_Graph/01_operations_on_a_graph.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "deletable": true, 7 | "editable": true 8 | }, 9 | "source": [ 10 | "# Operations on a Computational Graph\n", 11 | "\n", 12 | "We start by loading the necessary libraries and resetting the computational graph." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": true, 20 | "deletable": true, 21 | "editable": true 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "import os\n", 26 | "import matplotlib.pyplot as plt\n", 27 | "import numpy as np\n", 28 | "import tensorflow as tf\n", 29 | "from tensorflow.python.framework import ops\n", 30 | "ops.reset_default_graph()" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": { 36 | "deletable": true, 37 | "editable": true 38 | }, 39 | "source": [ 40 | "### Start a graph session" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 3, 46 | "metadata": { 47 | "collapsed": true, 48 | "deletable": true, 49 | "editable": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "sess = tf.Session()" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": { 59 | "deletable": true, 60 | "editable": true 61 | }, 62 | "source": [ 63 | "### Create tensors" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 4, 69 | "metadata": { 70 | "collapsed": true, 71 | "deletable": true, 72 | "editable": true 73 | }, 74 | "outputs": [], 75 | "source": [ 76 | "# Create data to feed in the placeholder\n", 77 | "x_vals = np.array([1., 3., 5., 7., 9.])\n", 78 | "\n", 79 | "# Create the TensorFlow Placceholder\n", 80 | "x_data = tf.placeholder(tf.float32)\n", 81 | "\n", 82 | "# Constant for multilication\n", 83 | "m = tf.constant(3.)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": { 89 | "deletable": true, 90 | "editable": true 91 | }, 92 | "source": [ 93 | "We loop through the input values and print out the multiplication operation for each input." 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "metadata": { 100 | "collapsed": false, 101 | "deletable": true, 102 | "editable": true 103 | }, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "3.0\n", 110 | "9.0\n", 111 | "15.0\n", 112 | "21.0\n", 113 | "27.0\n" 114 | ] 115 | } 116 | ], 117 | "source": [ 118 | "# Multiplication\n", 119 | "prod = tf.multiply(x_data, m)\n", 120 | "for x_val in x_vals:\n", 121 | " print(sess.run(prod, feed_dict={x_data: x_val}))" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": { 127 | "deletable": true, 128 | "editable": true 129 | }, 130 | "source": [ 131 | "### Output graph to Tensorboard" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 6, 137 | "metadata": { 138 | "collapsed": false, 139 | "deletable": true, 140 | "editable": true 141 | }, 142 | "outputs": [], 143 | "source": [ 144 | "merged = tf.summary.merge_all(key='summaries')\n", 145 | "if not os.path.exists('tensorboard_logs/'):\n", 146 | " os.makedirs('tensorboard_logs/')\n", 147 | "\n", 148 | "my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph)" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "metadata": { 154 | "deletable": true, 155 | "editable": true 156 | }, 157 | "source": [ 158 | "![Operations on a Graph](https://github.com/nfmcclure/tensorflow_cookbook/raw/master/02_TensorFlow_Way/images/01_Operations_on_a_Graph.png)" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "metadata": { 165 | "collapsed": true, 166 | "deletable": true, 167 | "editable": true 168 | }, 169 | "outputs": [], 170 | "source": [] 171 | } 172 | ], 173 | "metadata": { 174 | "kernelspec": { 175 | "display_name": "Python 3", 176 | "language": "python", 177 | "name": "python3" 178 | }, 179 | "language_info": { 180 | "codemirror_mode": { 181 | "name": "ipython", 182 | "version": 3 183 | }, 184 | "file_extension": ".py", 185 | "mimetype": "text/x-python", 186 | "name": "python", 187 | "nbconvert_exporter": "python", 188 | "pygments_lexer": "ipython3", 189 | "version": "3.6.0" 190 | } 191 | }, 192 | "nbformat": 4, 193 | "nbformat_minor": 2 194 | } 195 | -------------------------------------------------------------------------------- /02_TensorFlow_Way/01_Operations_as_a_Computational_Graph/01_operations_on_a_graph.py: -------------------------------------------------------------------------------- 1 | # Operations on a Computational Graph 2 | import os 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow.python.framework import ops 6 | ops.reset_default_graph() 7 | 8 | # Create graph 9 | sess = tf.Session() 10 | 11 | # Create tensors 12 | 13 | # Create data to feed in 14 | x_vals = np.array([1., 3., 5., 7., 9.]) 15 | x_data = tf.placeholder(tf.float32) 16 | m_const = tf.constant(3.) 17 | 18 | # Multiplication 19 | my_product = tf.multiply(x_data, m_const) 20 | for x_val in x_vals: 21 | print(sess.run(my_product, feed_dict={x_data: x_val})) 22 | 23 | # View the tensorboard graph by running the following code and then 24 | # going to the terminal and typing: 25 | # $ tensorboard --logdir=tensorboard_logs 26 | merged = tf.summary.merge_all() 27 | if not os.path.exists('tensorboard_logs/'): 28 | os.makedirs('tensorboard_logs/') 29 | 30 | my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph) 31 | -------------------------------------------------------------------------------- /02_TensorFlow_Way/02_Layering_Nested_Operations/02_layering_nested_operations.py: -------------------------------------------------------------------------------- 1 | # Layering Nested Operations 2 | 3 | import os 4 | import numpy as np 5 | import tensorflow as tf 6 | from tensorflow.python.framework import ops 7 | ops.reset_default_graph() 8 | 9 | # Start a graph session 10 | sess = tf.Session() 11 | 12 | # Create the data and variables 13 | my_array = np.array([[1., 3., 5., 7., 9.], 14 | [-2., 0., 2., 4., 6.], 15 | [-6., -3., 0., 3., 6.]]) 16 | x_vals = np.array([my_array, my_array + 1]) 17 | x_data = tf.placeholder(tf.float32, shape=(3, 5)) 18 | 19 | # Constants for matrix multiplication: 20 | m1 = tf.constant([[1.], [0.], [-1.], [2.], [4.]]) 21 | m2 = tf.constant([[2.]]) 22 | a1 = tf.constant([[10.]]) 23 | 24 | # Create our multiple operations 25 | prod1 = tf.matmul(x_data, m1) 26 | prod2 = tf.matmul(prod1, m2) 27 | add1 = tf.add(prod2, a1) 28 | 29 | # Now feed data through placeholder and print results 30 | for x_val in x_vals: 31 | print(sess.run(add1, feed_dict={x_data: x_val})) 32 | 33 | # View the tensorboard graph by running the following code and then 34 | # going to the terminal and typing: 35 | # $ tensorboard --logdir=tensorboard_logs 36 | merged = tf.summary.merge_all() 37 | if not os.path.exists('tensorboard_logs/'): 38 | os.makedirs('tensorboard_logs/') 39 | 40 | my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph) 41 | -------------------------------------------------------------------------------- /02_TensorFlow_Way/03_Working_with_Multiple_Layers/03_multiple_layers.py: -------------------------------------------------------------------------------- 1 | # Working with Multiple Layers 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | import os 6 | from tensorflow.python.framework import ops 7 | ops.reset_default_graph() 8 | 9 | # Create graph 10 | sess = tf.Session() 11 | 12 | # Create tensors 13 | 14 | # Create a small random 'image' of size 4x4 15 | x_shape = [1, 4, 4, 1] 16 | x_val = np.random.uniform(size=x_shape) 17 | 18 | x_data = tf.placeholder(tf.float32, shape=x_shape) 19 | 20 | # Create a layer that takes a spatial moving window average 21 | # Our window will be 2x2 with a stride of 2 for height and width 22 | # The filter value will be 0.25 because we want the average of the 2x2 window 23 | my_filter = tf.constant(0.25, shape=[2, 2, 1, 1]) 24 | my_strides = [1, 2, 2, 1] 25 | mov_avg_layer= tf.nn.conv2d(x_data, my_filter, my_strides, 26 | padding='SAME', name='Moving_Avg_Window') 27 | 28 | # Define a custom layer which will be sigmoid(Ax+b) where 29 | # x is a 2x2 matrix and A and b are 2x2 matrices 30 | def custom_layer(input_matrix): 31 | input_matrix_sqeezed = tf.squeeze(input_matrix) 32 | A = tf.constant([[1., 2.], [-1., 3.]]) 33 | b = tf.constant(1., shape=[2, 2]) 34 | temp1 = tf.matmul(A, input_matrix_sqeezed) 35 | temp = tf.add(temp1, b) # Ax + b 36 | return(tf.sigmoid(temp)) 37 | 38 | # Add custom layer to graph 39 | with tf.name_scope('Custom_Layer') as scope: 40 | custom_layer1 = custom_layer(mov_avg_layer) 41 | 42 | # The output should be an array that is 2x2, but size (1,2,2,1) 43 | print(sess.run(mov_avg_layer, feed_dict={x_data: x_val})) 44 | 45 | # After custom operation, size is now 2x2 (squeezed out size 1 dims) 46 | print(sess.run(custom_layer1, feed_dict={x_data: x_val})) 47 | 48 | merged = tf.summary.merge_all(key='summaries') 49 | 50 | if not os.path.exists('tensorboard_logs/'): 51 | os.makedirs('tensorboard_logs/') 52 | 53 | my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph) 54 | -------------------------------------------------------------------------------- /02_TensorFlow_Way/04_Implementing_Loss_Functions/04_loss_functions.py: -------------------------------------------------------------------------------- 1 | # Loss Functions 2 | #---------------------------------- 3 | # 4 | # This python script illustrates the different 5 | # loss functions for regression and classification. 6 | 7 | import matplotlib.pyplot as plt 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # Create graph 13 | sess = tf.Session() 14 | 15 | ###### Numerical Predictions ###### 16 | x_vals = tf.linspace(-1., 1., 500) 17 | target = tf.constant(0.) 18 | 19 | # L2 loss 20 | # L = (pred - actual)^2 21 | l2_y_vals = tf.square(target - x_vals) 22 | l2_y_out = sess.run(l2_y_vals) 23 | 24 | # L1 loss 25 | # L = abs(pred - actual) 26 | l1_y_vals = tf.abs(target - x_vals) 27 | l1_y_out = sess.run(l1_y_vals) 28 | 29 | # Pseudo-Huber loss 30 | # L = delta^2 * (sqrt(1 + ((pred - actual)/delta)^2) - 1) 31 | delta1 = tf.constant(0.25) 32 | phuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals)/delta1)) - 1.) 33 | phuber1_y_out = sess.run(phuber1_y_vals) 34 | 35 | delta2 = tf.constant(5.) 36 | phuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals)/delta2)) - 1.) 37 | phuber2_y_out = sess.run(phuber2_y_vals) 38 | 39 | # Plot the output: 40 | x_array = sess.run(x_vals) 41 | plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss') 42 | plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss') 43 | plt.plot(x_array, phuber1_y_out, 'k-.', label='P-Huber Loss (0.25)') 44 | plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)') 45 | plt.ylim(-0.2, 0.4) 46 | plt.legend(loc='lower right', prop={'size': 11}) 47 | plt.grid() 48 | plt.show() 49 | 50 | 51 | ###### Categorical Predictions ###### 52 | x_vals = tf.linspace(-3., 5., 500) 53 | target = tf.constant(1.) 54 | targets = tf.fill([500,], 1.) 55 | 56 | # Hinge loss 57 | # Use for predicting binary (-1, 1) classes 58 | # L = max(0, 1 - (pred * actual)) 59 | hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals)) 60 | hinge_y_out = sess.run(hinge_y_vals) 61 | 62 | # Cross entropy loss 63 | # L = -actual * (log(pred)) - (1-actual)(log(1-pred)) 64 | xentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals)) 65 | xentropy_y_out = sess.run(xentropy_y_vals) 66 | 67 | # L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred))) 68 | # or 69 | # L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual))) 70 | x_val_input = tf.expand_dims(x_vals, 1) 71 | target_input = tf.expand_dims(targets, 1) 72 | xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_val_input, 73 | labels=target_input) 74 | xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals) 75 | 76 | # Weighted (softmax) cross entropy loss 77 | # L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred)) 78 | # or 79 | # L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual)) 80 | weight = tf.constant(0.5) 81 | xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(logits=x_vals, 82 | targets=targets, 83 | pos_weight=weight) 84 | xentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals) 85 | 86 | # Plot the output 87 | x_array = sess.run(x_vals) 88 | plt.plot(x_array, hinge_y_out, 'b-', label='Hinge Loss') 89 | plt.plot(x_array, xentropy_y_out, 'r--', label='Cross Entropy Loss') 90 | plt.plot(x_array, xentropy_sigmoid_y_out, 'k-.', label='Cross Entropy Sigmoid Loss') 91 | plt.plot(x_array, xentropy_weighted_y_out, 'g:', label='Weighted Cross Entropy Loss (x0.5)') 92 | plt.ylim(-1.5, 3) 93 | #plt.xlim(-1, 3) 94 | plt.grid() 95 | plt.legend(loc='lower right', prop={'size': 11}) 96 | plt.show() 97 | 98 | # Softmax entropy loss 99 | # L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred))) 100 | unscaled_logits = tf.constant([[1., -3., 10.]]) 101 | target_dist = tf.constant([[0.1, 0.02, 0.88]]) 102 | softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=unscaled_logits, 103 | labels=target_dist) 104 | print(sess.run(softmax_xentropy)) 105 | 106 | # Sparse entropy loss 107 | # Use when classes and targets have to be mutually exclusive 108 | # L = sum( -actual * log(pred) ) 109 | unscaled_logits = tf.constant([[1., -3., 10.]]) 110 | sparse_target_dist = tf.constant([2]) 111 | sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unscaled_logits, 112 | labels=sparse_target_dist) 113 | print(sess.run(sparse_xentropy)) -------------------------------------------------------------------------------- /02_TensorFlow_Way/05_Implementing_Back_Propagation/05_back_propagation.py: -------------------------------------------------------------------------------- 1 | # Back Propagation 2 | #---------------------------------- 3 | # 4 | # This python function shows how to implement back propagation 5 | # in regression and classification models. 6 | 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # Create graph 13 | sess = tf.Session() 14 | 15 | # Regression Example: 16 | # We will create sample data as follows: 17 | # x-data: 100 random samples from a normal ~ N(1, 0.1) 18 | # target: 100 values of the value 10. 19 | # We will fit the model: 20 | # x-data * A = target 21 | # Theoretically, A = 10. 22 | 23 | # Create data 24 | x_vals = np.random.normal(1, 0.1, 100) 25 | y_vals = np.repeat(10., 100) 26 | x_data = tf.placeholder(shape=[1], dtype=tf.float32) 27 | y_target = tf.placeholder(shape=[1], dtype=tf.float32) 28 | 29 | # Create variable (one model parameter = A) 30 | A = tf.Variable(tf.random_normal(shape=[1])) 31 | 32 | # Add operation to graph 33 | my_output = tf.multiply(x_data, A) 34 | 35 | # Add L2 loss operation to graph 36 | loss = tf.square(my_output - y_target) 37 | 38 | # Create Optimizer 39 | my_opt = tf.train.GradientDescentOptimizer(0.02) 40 | train_step = my_opt.minimize(loss) 41 | 42 | # Initialize variables 43 | init = tf.global_variables_initializer() 44 | sess.run(init) 45 | 46 | # Run Loop 47 | for i in range(100): 48 | rand_index = np.random.choice(100) 49 | rand_x = [x_vals[rand_index]] 50 | rand_y = [y_vals[rand_index]] 51 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 52 | if (i+1)%25==0: 53 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A))) 54 | print('Loss = ' + str(sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}))) 55 | 56 | # Classification Example 57 | # We will create sample data as follows: 58 | # x-data: sample 50 random values from a normal = N(-1, 1) 59 | # + sample 50 random values from a normal = N(1, 1) 60 | # target: 50 values of 0 + 50 values of 1. 61 | # These are essentially 100 values of the corresponding output index 62 | # We will fit the binary classification model: 63 | # If sigmoid(x+A) < 0.5 -> 0 else 1 64 | # Theoretically, A should be -(mean1 + mean2)/2 65 | 66 | ops.reset_default_graph() 67 | 68 | # Create graph 69 | sess = tf.Session() 70 | 71 | # Create data 72 | x_vals = np.concatenate((np.random.normal(-1, 1, 50), np.random.normal(3, 1, 50))) 73 | y_vals = np.concatenate((np.repeat(0., 50), np.repeat(1., 50))) 74 | x_data = tf.placeholder(shape=[1], dtype=tf.float32) 75 | y_target = tf.placeholder(shape=[1], dtype=tf.float32) 76 | 77 | # Create variable (one model parameter = A) 78 | A = tf.Variable(tf.random_normal(mean=10, shape=[1])) 79 | 80 | # Add operation to graph 81 | # Want to create the operstion sigmoid(x + A) 82 | # Note, the sigmoid() part is in the loss function 83 | my_output = tf.add(x_data, A) 84 | 85 | # Now we have to add another dimension to each (batch size of 1) 86 | my_output_expanded = tf.expand_dims(my_output, 0) 87 | y_target_expanded = tf.expand_dims(y_target, 0) 88 | 89 | # Initialize variables 90 | init = tf.global_variables_initializer() 91 | sess.run(init) 92 | 93 | # Add classification loss (cross entropy) 94 | xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output_expanded, labels=y_target_expanded) 95 | 96 | # Create Optimizer 97 | my_opt = tf.train.GradientDescentOptimizer(0.05) 98 | train_step = my_opt.minimize(xentropy) 99 | 100 | # Run loop 101 | for i in range(1400): 102 | rand_index = np.random.choice(100) 103 | rand_x = [x_vals[rand_index]] 104 | rand_y = [y_vals[rand_index]] 105 | 106 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 107 | if (i+1)%200==0: 108 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A))) 109 | print('Loss = ' + str(sess.run(xentropy, feed_dict={x_data: rand_x, y_target: rand_y}))) 110 | 111 | # Evaluate Predictions 112 | predictions = [] 113 | for i in range(len(x_vals)): 114 | x_val = [x_vals[i]] 115 | prediction = sess.run(tf.round(tf.sigmoid(my_output)), feed_dict={x_data: x_val}) 116 | predictions.append(prediction[0]) 117 | 118 | accuracy = sum(x==y for x,y in zip(predictions, y_vals))/100. 119 | print('Ending Accuracy = ' + str(np.round(accuracy, 2))) -------------------------------------------------------------------------------- /02_TensorFlow_Way/06_Working_with_Batch_and_Stochastic_Training/06_batch_stochastic_training.py: -------------------------------------------------------------------------------- 1 | # Batch and Stochastic Training 2 | #---------------------------------- 3 | # 4 | # This python function illustrates two different training methods: 5 | # batch and stochastic training. For each model, we will use 6 | # a regression model that predicts one model variable. 7 | 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import tensorflow as tf 11 | from tensorflow.python.framework import ops 12 | ops.reset_default_graph() 13 | 14 | # We will implement a regression example in stochastic and batch training 15 | 16 | # Stochastic Training: 17 | # Create graph 18 | sess = tf.Session() 19 | 20 | # Create data 21 | x_vals = np.random.normal(1, 0.1, 100) 22 | y_vals = np.repeat(10., 100) 23 | x_data = tf.placeholder(shape=[1], dtype=tf.float32) 24 | y_target = tf.placeholder(shape=[1], dtype=tf.float32) 25 | 26 | # Create variable (one model parameter = A) 27 | A = tf.Variable(tf.random_normal(shape=[1])) 28 | 29 | # Add operation to graph 30 | my_output = tf.multiply(x_data, A) 31 | 32 | # Add L2 loss operation to graph 33 | loss = tf.square(my_output - y_target) 34 | 35 | # Create Optimizer 36 | my_opt = tf.train.GradientDescentOptimizer(0.02) 37 | train_step = my_opt.minimize(loss) 38 | 39 | # Initialize variables 40 | init = tf.global_variables_initializer() 41 | sess.run(init) 42 | 43 | loss_stochastic = [] 44 | # Run Loop 45 | for i in range(100): 46 | rand_index = np.random.choice(100) 47 | rand_x = [x_vals[rand_index]] 48 | rand_y = [y_vals[rand_index]] 49 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 50 | if (i+1)%5==0: 51 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A))) 52 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 53 | print('Loss = ' + str(temp_loss)) 54 | loss_stochastic.append(temp_loss) 55 | 56 | 57 | # Batch Training: 58 | # Re-initialize graph 59 | ops.reset_default_graph() 60 | sess = tf.Session() 61 | 62 | # Declare batch size 63 | batch_size = 20 64 | 65 | # Create data 66 | x_vals = np.random.normal(1, 0.1, 100) 67 | y_vals = np.repeat(10., 100) 68 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 69 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 70 | 71 | # Create variable (one model parameter = A) 72 | A = tf.Variable(tf.random_normal(shape=[1,1])) 73 | 74 | # Add operation to graph 75 | my_output = tf.matmul(x_data, A) 76 | 77 | # Add L2 loss operation to graph 78 | loss = tf.reduce_mean(tf.square(my_output - y_target)) 79 | 80 | # Create Optimizer 81 | my_opt = tf.train.GradientDescentOptimizer(0.02) 82 | train_step = my_opt.minimize(loss) 83 | 84 | # Initialize variables 85 | init = tf.global_variables_initializer() 86 | sess.run(init) 87 | 88 | loss_batch = [] 89 | # Run Loop 90 | for i in range(100): 91 | rand_index = np.random.choice(100, size=batch_size) 92 | rand_x = np.transpose([x_vals[rand_index]]) 93 | rand_y = np.transpose([y_vals[rand_index]]) 94 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 95 | if (i+1)%5==0: 96 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A))) 97 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 98 | print('Loss = ' + str(temp_loss)) 99 | loss_batch.append(temp_loss) 100 | 101 | plt.plot(range(0, 100, 5), loss_stochastic, 'b-', label='Stochastic Loss') 102 | plt.plot(range(0, 100, 5), loss_batch, 'r--', label='Batch Loss, size=20') 103 | plt.legend(loc='upper right', prop={'size': 11}) 104 | plt.show() -------------------------------------------------------------------------------- /02_TensorFlow_Way/07_Combining_Everything_Together/07_combining_everything_together.py: -------------------------------------------------------------------------------- 1 | # Combining Everything Together 2 | #---------------------------------- 3 | # This file will perform binary classification on the 4 | # iris dataset. We will only predict if a flower is 5 | # I.setosa or not. 6 | # 7 | # We will create a simple binary classifier by creating a line 8 | # and running everything through a sigmoid to get a binary predictor. 9 | # The two features we will use are pedal length and pedal width. 10 | # 11 | # We will use batch training, but this can be easily 12 | # adapted to stochastic training. 13 | 14 | import matplotlib.pyplot as plt 15 | import numpy as np 16 | from sklearn import datasets 17 | import tensorflow as tf 18 | from tensorflow.python.framework import ops 19 | ops.reset_default_graph() 20 | 21 | # Load the iris data 22 | # iris.target = {0, 1, 2}, where '0' is setosa 23 | # iris.data ~ [sepal.width, sepal.length, pedal.width, pedal.length] 24 | iris = datasets.load_iris() 25 | binary_target = np.array([1. if x==0 else 0. for x in iris.target]) 26 | iris_2d = np.array([[x[2], x[3]] for x in iris.data]) 27 | 28 | # Declare batch size 29 | batch_size = 20 30 | 31 | # Create graph 32 | sess = tf.Session() 33 | 34 | # Declare placeholders 35 | x1_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 36 | x2_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 37 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 38 | 39 | # Create variables A and b (0 = x1 - A*x2 + b) 40 | A = tf.Variable(tf.random_normal(shape=[1, 1])) 41 | b = tf.Variable(tf.random_normal(shape=[1, 1])) 42 | 43 | # Add model to graph: 44 | # x1 - A*x2 + b 45 | my_mult = tf.matmul(x2_data, A) 46 | my_add = tf.add(my_mult, b) 47 | my_output = tf.subtract(x1_data, my_add) 48 | 49 | # Add classification loss (cross entropy) 50 | xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output, labels=y_target) 51 | 52 | # Create Optimizer 53 | my_opt = tf.train.GradientDescentOptimizer(0.05) 54 | train_step = my_opt.minimize(xentropy) 55 | 56 | # Initialize variables 57 | init = tf.global_variables_initializer() 58 | sess.run(init) 59 | 60 | # Run Loop 61 | for i in range(1000): 62 | rand_index = np.random.choice(len(iris_2d), size=batch_size) 63 | #rand_x = np.transpose([iris_2d[rand_index]]) 64 | rand_x = iris_2d[rand_index] 65 | rand_x1 = np.array([[x[0]] for x in rand_x]) 66 | rand_x2 = np.array([[x[1]] for x in rand_x]) 67 | #rand_y = np.transpose([binary_target[rand_index]]) 68 | rand_y = np.array([[y] for y in binary_target[rand_index]]) 69 | sess.run(train_step, feed_dict={x1_data: rand_x1, x2_data: rand_x2, y_target: rand_y}) 70 | if (i+1)%200==0: 71 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ', b = ' + str(sess.run(b))) 72 | 73 | 74 | # Visualize Results 75 | # Pull out slope/intercept 76 | [[slope]] = sess.run(A) 77 | [[intercept]] = sess.run(b) 78 | 79 | # Create fitted line 80 | x = np.linspace(0, 3, num=50) 81 | ablineValues = [] 82 | for i in x: 83 | ablineValues.append(slope*i+intercept) 84 | 85 | # Plot the fitted line over the data 86 | setosa_x = [a[1] for i,a in enumerate(iris_2d) if binary_target[i]==1] 87 | setosa_y = [a[0] for i,a in enumerate(iris_2d) if binary_target[i]==1] 88 | non_setosa_x = [a[1] for i,a in enumerate(iris_2d) if binary_target[i]==0] 89 | non_setosa_y = [a[0] for i,a in enumerate(iris_2d) if binary_target[i]==0] 90 | plt.plot(setosa_x, setosa_y, 'rx', ms=10, mew=2, label='setosa') 91 | plt.plot(non_setosa_x, non_setosa_y, 'ro', label='Non-setosa') 92 | plt.plot(x, ablineValues, 'b-') 93 | plt.xlim([0.0, 2.7]) 94 | plt.ylim([0.0, 7.1]) 95 | plt.suptitle('Linear Separator For I.setosa', fontsize=20) 96 | plt.xlabel('Petal Length') 97 | plt.ylabel('Petal Width') 98 | plt.legend(loc='lower right') 99 | plt.show() -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/01_Operations_on_a_Graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/01_Operations_on_a_Graph.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/02_Multiple_Operations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/02_Multiple_Operations.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/03_Multiple_Layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/03_Multiple_Layers.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/04_loss_fun1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/04_loss_fun1.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/04_loss_fun2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/04_loss_fun2.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/06_Back_Propagation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/06_Back_Propagation.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/07_Combing_Everything_Together.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/07_Combing_Everything_Together.png -------------------------------------------------------------------------------- /02_TensorFlow_Way/images/08_Evaluating_Models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/02_TensorFlow_Way/images/08_Evaluating_Models.png -------------------------------------------------------------------------------- /03_Linear_Regression/01_Using_the_Matrix_Inverse_Method/01_lin_reg_inverse.py: -------------------------------------------------------------------------------- 1 | # Linear Regression: Inverse Matrix Method 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve linear regression via the matrix inverse. 6 | # 7 | # Given Ax=b, solving for x: 8 | # x = (t(A) * A)^(-1) * t(A) * b 9 | # where t(A) is the transpose of A 10 | 11 | import matplotlib.pyplot as plt 12 | import numpy as np 13 | import tensorflow as tf 14 | from tensorflow.python.framework import ops 15 | ops.reset_default_graph() 16 | 17 | # Create graph 18 | sess = tf.Session() 19 | 20 | # Create the data 21 | x_vals = np.linspace(0, 10, 100) 22 | y_vals = x_vals + np.random.normal(0, 1, 100) 23 | 24 | # Create design matrix 25 | x_vals_column = np.transpose(np.matrix(x_vals)) 26 | ones_column = np.transpose(np.matrix(np.repeat(1, 100))) 27 | A = np.column_stack((x_vals_column, ones_column)) 28 | 29 | # Create b matrix 30 | b = np.transpose(np.matrix(y_vals)) 31 | 32 | # Create tensors 33 | A_tensor = tf.constant(A) 34 | b_tensor = tf.constant(b) 35 | 36 | # Matrix inverse solution 37 | tA_A = tf.matmul(tf.transpose(A_tensor), A_tensor) 38 | tA_A_inv = tf.matrix_inverse(tA_A) 39 | product = tf.matmul(tA_A_inv, tf.transpose(A_tensor)) 40 | solution = tf.matmul(product, b_tensor) 41 | 42 | solution_eval = sess.run(solution) 43 | 44 | # Extract coefficients 45 | slope = solution_eval[0][0] 46 | y_intercept = solution_eval[1][0] 47 | 48 | print('slope: ' + str(slope)) 49 | print('y_intercept: ' + str(y_intercept)) 50 | 51 | # Get best fit line 52 | best_fit = [] 53 | for i in x_vals: 54 | best_fit.append(slope*i+y_intercept) 55 | 56 | # Plot the results 57 | plt.plot(x_vals, y_vals, 'o', label='Data') 58 | plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) 59 | plt.legend(loc='upper left') 60 | plt.show() -------------------------------------------------------------------------------- /03_Linear_Regression/02_Implementing_a_Decomposition_Method/02_lin_reg_decomposition.py: -------------------------------------------------------------------------------- 1 | # Linear Regression: Decomposition Method 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve linear regression via the matrix inverse. 6 | # 7 | # Given Ax=b, and a Cholesky decomposition such that 8 | # A = L*L' then we can get solve for x via 9 | # 1) L*y=t(A)*b 10 | # 2) L'*x=y 11 | 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import tensorflow as tf 15 | from tensorflow.python.framework import ops 16 | ops.reset_default_graph() 17 | 18 | # Create graph 19 | sess = tf.Session() 20 | 21 | # Create the data 22 | x_vals = np.linspace(0, 10, 100) 23 | y_vals = x_vals + np.random.normal(0, 1, 100) 24 | 25 | # Create design matrix 26 | x_vals_column = np.transpose(np.matrix(x_vals)) 27 | ones_column = np.transpose(np.matrix(np.repeat(1, 100))) 28 | A = np.column_stack((x_vals_column, ones_column)) 29 | 30 | # Create b matrix 31 | b = np.transpose(np.matrix(y_vals)) 32 | 33 | # Create tensors 34 | A_tensor = tf.constant(A) 35 | b_tensor = tf.constant(b) 36 | 37 | # Find Cholesky Decomposition 38 | tA_A = tf.matmul(tf.transpose(A_tensor), A_tensor) 39 | L = tf.cholesky(tA_A) 40 | 41 | # Solve L*y=t(A)*b 42 | tA_b = tf.matmul(tf.transpose(A_tensor), b) 43 | sol1 = tf.matrix_solve(L, tA_b) 44 | 45 | # Solve L' * y = sol1 46 | sol2 = tf.matrix_solve(tf.transpose(L), sol1) 47 | 48 | solution_eval = sess.run(sol2) 49 | 50 | # Extract coefficients 51 | slope = solution_eval[0][0] 52 | y_intercept = solution_eval[1][0] 53 | 54 | print('slope: ' + str(slope)) 55 | print('y_intercept: ' + str(y_intercept)) 56 | 57 | # Get best fit line 58 | best_fit = [] 59 | for i in x_vals: 60 | best_fit.append(slope*i+y_intercept) 61 | 62 | # Plot the results 63 | plt.plot(x_vals, y_vals, 'o', label='Data') 64 | plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) 65 | plt.legend(loc='upper left') 66 | plt.show() -------------------------------------------------------------------------------- /03_Linear_Regression/03_TensorFlow_Way_of_Linear_Regression/03_lin_reg_tensorflow_way.py: -------------------------------------------------------------------------------- 1 | # Linear Regression: TensorFlow Way 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve linear regression. 6 | # y = Ax + b 7 | # 8 | # We will use the iris data, specifically: 9 | # y = Sepal Length 10 | # x = Petal Width 11 | 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import tensorflow as tf 15 | from sklearn import datasets 16 | from tensorflow.python.framework import ops 17 | ops.reset_default_graph() 18 | 19 | # Create graph 20 | sess = tf.Session() 21 | 22 | # Load the data 23 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 24 | iris = datasets.load_iris() 25 | x_vals = np.array([x[3] for x in iris.data]) 26 | y_vals = np.array([y[0] for y in iris.data]) 27 | 28 | # Declare batch size 29 | batch_size = 25 30 | 31 | # Initialize placeholders 32 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 33 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 34 | 35 | # Create variables for linear regression 36 | A = tf.Variable(tf.random_normal(shape=[1,1])) 37 | b = tf.Variable(tf.random_normal(shape=[1,1])) 38 | 39 | # Declare model operations 40 | model_output = tf.add(tf.matmul(x_data, A), b) 41 | 42 | # Declare loss function (L2 loss) 43 | loss = tf.reduce_mean(tf.square(y_target - model_output)) 44 | 45 | # Declare optimizer 46 | my_opt = tf.train.GradientDescentOptimizer(0.05) 47 | train_step = my_opt.minimize(loss) 48 | 49 | # Initialize variables 50 | init = tf.global_variables_initializer() 51 | sess.run(init) 52 | 53 | # Training loop 54 | loss_vec = [] 55 | for i in range(100): 56 | rand_index = np.random.choice(len(x_vals), size=batch_size) 57 | rand_x = np.transpose([x_vals[rand_index]]) 58 | rand_y = np.transpose([y_vals[rand_index]]) 59 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 60 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 61 | loss_vec.append(temp_loss) 62 | if (i+1)%25==0: 63 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 64 | print('Loss = ' + str(temp_loss)) 65 | 66 | # Get the optimal coefficients 67 | [slope] = sess.run(A) 68 | [y_intercept] = sess.run(b) 69 | 70 | # Get best fit line 71 | best_fit = [] 72 | for i in x_vals: 73 | best_fit.append(slope*i+y_intercept) 74 | 75 | # Plot the result 76 | plt.plot(x_vals, y_vals, 'o', label='Data Points') 77 | plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) 78 | plt.legend(loc='upper left') 79 | plt.title('Sepal Length vs Pedal Width') 80 | plt.xlabel('Pedal Width') 81 | plt.ylabel('Sepal Length') 82 | plt.show() 83 | 84 | # Plot loss over time 85 | plt.plot(loss_vec, 'k-') 86 | plt.title('L2 Loss per Generation') 87 | plt.xlabel('Generation') 88 | plt.ylabel('L2 Loss') 89 | plt.show() 90 | -------------------------------------------------------------------------------- /03_Linear_Regression/04_Loss_Functions_in_Linear_Regressions/04_lin_reg_l1_vs_l2.py: -------------------------------------------------------------------------------- 1 | # Linear Regression: L1 vs L2 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve linear regression via the matrix inverse. 6 | 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | import tensorflow as tf 10 | from sklearn import datasets 11 | from tensorflow.python.framework import ops 12 | ops.reset_default_graph() 13 | 14 | # Create graph 15 | sess = tf.Session() 16 | 17 | # Load the data 18 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 19 | iris = datasets.load_iris() 20 | x_vals = np.array([x[3] for x in iris.data]) 21 | y_vals = np.array([y[0] for y in iris.data]) 22 | 23 | # Declare batch size and number of iterations 24 | batch_size = 25 25 | learning_rate = 0.4 # Will not converge with learning rate at 0.4 26 | iterations = 50 27 | 28 | # Initialize placeholders 29 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 30 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 31 | 32 | # Create variables for linear regression 33 | A = tf.Variable(tf.random_normal(shape=[1,1])) 34 | b = tf.Variable(tf.random_normal(shape=[1,1])) 35 | 36 | # Declare model operations 37 | model_output = tf.add(tf.matmul(x_data, A), b) 38 | 39 | # Declare loss functions 40 | loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output)) 41 | 42 | # Declare optimizers 43 | my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate) 44 | train_step_l1 = my_opt_l1.minimize(loss_l1) 45 | 46 | # Initialize variables 47 | init = tf.global_variables_initializer() 48 | sess.run(init) 49 | 50 | # Training loop 51 | loss_vec_l1 = [] 52 | for i in range(iterations): 53 | rand_index = np.random.choice(len(x_vals), size=batch_size) 54 | rand_x = np.transpose([x_vals[rand_index]]) 55 | rand_y = np.transpose([y_vals[rand_index]]) 56 | sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y}) 57 | temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y}) 58 | loss_vec_l1.append(temp_loss_l1) 59 | if (i+1)%25==0: 60 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 61 | 62 | 63 | # L2 Loss 64 | # Reinitialize graph 65 | ops.reset_default_graph() 66 | 67 | # Create graph 68 | sess = tf.Session() 69 | 70 | # Initialize placeholders 71 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 72 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 73 | 74 | # Create variables for linear regression 75 | A = tf.Variable(tf.random_normal(shape=[1,1])) 76 | b = tf.Variable(tf.random_normal(shape=[1,1])) 77 | 78 | # Declare model operations 79 | model_output = tf.add(tf.matmul(x_data, A), b) 80 | 81 | # Declare loss functions 82 | loss_l2 = tf.reduce_mean(tf.square(y_target - model_output)) 83 | 84 | # Declare optimizers 85 | my_opt_l2 = tf.train.GradientDescentOptimizer(learning_rate) 86 | train_step_l2 = my_opt_l2.minimize(loss_l2) 87 | 88 | # Initialize variables 89 | init = tf.global_variables_initializer() 90 | sess.run(init) 91 | 92 | loss_vec_l2 = [] 93 | for i in range(iterations): 94 | rand_index = np.random.choice(len(x_vals), size=batch_size) 95 | rand_x = np.transpose([x_vals[rand_index]]) 96 | rand_y = np.transpose([y_vals[rand_index]]) 97 | sess.run(train_step_l2, feed_dict={x_data: rand_x, y_target: rand_y}) 98 | temp_loss_l2 = sess.run(loss_l2, feed_dict={x_data: rand_x, y_target: rand_y}) 99 | loss_vec_l2.append(temp_loss_l2) 100 | if (i+1)%25==0: 101 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 102 | 103 | 104 | # Plot loss over time 105 | plt.plot(loss_vec_l1, 'k-', label='L1 Loss') 106 | plt.plot(loss_vec_l2, 'r--', label='L2 Loss') 107 | plt.title('L1 and L2 Loss per Generation') 108 | plt.xlabel('Generation') 109 | plt.ylabel('L1 Loss') 110 | plt.legend(loc='upper right') 111 | plt.show() 112 | -------------------------------------------------------------------------------- /03_Linear_Regression/05_Implementing_Deming_Regression/05_deming_regression.py: -------------------------------------------------------------------------------- 1 | # Deming Regression 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve linear Deming regression. 6 | # y = Ax + b 7 | # 8 | # We will use the iris data, specifically: 9 | # y = Sepal Length 10 | # x = Petal Width 11 | 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import tensorflow as tf 15 | from sklearn import datasets 16 | from tensorflow.python.framework import ops 17 | ops.reset_default_graph() 18 | 19 | # Set a random seed 20 | tf.set_random_seed(42) 21 | np.random.seed(42) 22 | 23 | # Create graph 24 | sess = tf.Session() 25 | 26 | # Load the data 27 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 28 | iris = datasets.load_iris() 29 | x_vals = np.array([x[3] for x in iris.data]) 30 | y_vals = np.array([y[0] for y in iris.data]) 31 | 32 | # Declare batch size 33 | batch_size = 50 34 | 35 | # Initialize placeholders 36 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 37 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 38 | 39 | # Create variables for linear regression 40 | A = tf.Variable(tf.random_normal(shape=[1,1])) 41 | b = tf.Variable(tf.random_normal(shape=[1,1])) 42 | 43 | # Declare model operations 44 | model_output = tf.add(tf.matmul(x_data, A), b) 45 | 46 | # Declare Demming loss function 47 | demming_numerator = tf.abs(tf.subtract(y_target, tf.add(tf.matmul(x_data, A), b))) 48 | demming_denominator = tf.sqrt(tf.add(tf.square(A),1)) 49 | loss = tf.reduce_mean(tf.truediv(demming_numerator, demming_denominator)) 50 | 51 | # Declare optimizer 52 | my_opt = tf.train.GradientDescentOptimizer(0.15) 53 | train_step = my_opt.minimize(loss) 54 | 55 | # Initialize variables 56 | init = tf.global_variables_initializer() 57 | sess.run(init) 58 | 59 | # Training loop 60 | loss_vec = [] 61 | for i in range(1000): 62 | rand_index = np.random.choice(len(x_vals), size=batch_size) 63 | rand_x = np.transpose([x_vals[rand_index]]) 64 | rand_y = np.transpose([y_vals[rand_index]]) 65 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 66 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 67 | loss_vec.append(temp_loss) 68 | if (i+1)%50 == 0: 69 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 70 | print('Loss = ' + str(temp_loss)) 71 | 72 | # Get the optimal coefficients 73 | [slope] = sess.run(A) 74 | [y_intercept] = sess.run(b) 75 | 76 | # Get best fit line 77 | best_fit = [] 78 | for i in x_vals: 79 | best_fit.append(slope*i+y_intercept) 80 | 81 | # Plot the result 82 | plt.plot(x_vals, y_vals, 'o', label='Data Points') 83 | plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) 84 | plt.legend(loc='upper left') 85 | plt.title('Sepal Length vs Pedal Width') 86 | plt.xlabel('Pedal Width') 87 | plt.ylabel('Sepal Length') 88 | plt.show() 89 | 90 | # Plot loss over time 91 | plt.plot(loss_vec, 'k-') 92 | plt.title('L2 Loss per Generation') 93 | plt.xlabel('Generation') 94 | plt.ylabel('L2 Loss') 95 | plt.show() 96 | -------------------------------------------------------------------------------- /03_Linear_Regression/06_Implementing_Lasso_and_Ridge_Regression/06_lasso_and_ridge_regression.py: -------------------------------------------------------------------------------- 1 | # LASSO and Ridge Regression 2 | # 3 | # This function shows how to use TensorFlow to solve LASSO or 4 | # Ridge regression for 5 | # y = Ax + b 6 | # 7 | # We will use the iris data, specifically: 8 | # y = Sepal Length 9 | # x = Petal Width 10 | 11 | # import required libraries 12 | import matplotlib.pyplot as plt 13 | import sys 14 | import numpy as np 15 | import tensorflow as tf 16 | from sklearn import datasets 17 | from tensorflow.python.framework import ops 18 | 19 | 20 | # Specify 'Ridge' or 'LASSO' 21 | #regression_type = 'LASSO' 22 | regression_type = 'Ridge' 23 | 24 | # clear out old graph 25 | ops.reset_default_graph() 26 | 27 | # Create graph 28 | sess = tf.Session() 29 | 30 | ### 31 | # Load iris data 32 | ### 33 | 34 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 35 | iris = datasets.load_iris() 36 | x_vals = np.array([x[3] for x in iris.data]) 37 | y_vals = np.array([y[0] for y in iris.data]) 38 | 39 | ### 40 | # Model Parameters 41 | ### 42 | 43 | # Declare batch size 44 | batch_size = 50 45 | 46 | # Initialize placeholders 47 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 48 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 49 | 50 | # make results reproducible 51 | seed = 13 52 | np.random.seed(seed) 53 | tf.set_random_seed(seed) 54 | 55 | # Create variables for linear regression 56 | A = tf.Variable(tf.random_normal(shape=[1,1])) 57 | b = tf.Variable(tf.random_normal(shape=[1,1])) 58 | 59 | # Declare model operations 60 | model_output = tf.add(tf.matmul(x_data, A), b) 61 | 62 | ### 63 | # Loss Functions 64 | ### 65 | 66 | # Select appropriate loss function based on regression type 67 | 68 | if regression_type == 'LASSO': 69 | # Declare Lasso loss function 70 | # Lasso Loss = L2_Loss + heavyside_step, 71 | # Where heavyside_step ~ 0 if A < constant, otherwise ~ 99 72 | lasso_param = tf.constant(0.9) 73 | heavyside_step = tf.truediv(1., tf.add(1., tf.exp(tf.multiply(-50., tf.subtract(A, lasso_param))))) 74 | regularization_param = tf.multiply(heavyside_step, 99.) 75 | loss = tf.add(tf.reduce_mean(tf.square(y_target - model_output)), regularization_param) 76 | 77 | elif regression_type == 'Ridge': 78 | # Declare the Ridge loss function 79 | # Ridge loss = L2_loss + L2 norm of slope 80 | ridge_param = tf.constant(1.) 81 | ridge_loss = tf.reduce_mean(tf.square(A)) 82 | loss = tf.expand_dims(tf.add(tf.reduce_mean(tf.square(y_target - model_output)), tf.multiply(ridge_param, ridge_loss)), 0) 83 | 84 | else: 85 | print('Invalid regression_type parameter value',file=sys.stderr) 86 | 87 | 88 | ### 89 | # Optimizer 90 | ### 91 | 92 | # Declare optimizer 93 | my_opt = tf.train.GradientDescentOptimizer(0.001) 94 | train_step = my_opt.minimize(loss) 95 | 96 | ### 97 | # Run regression 98 | ### 99 | 100 | # Initialize variables 101 | init = tf.global_variables_initializer() 102 | sess.run(init) 103 | 104 | # Training loop 105 | loss_vec = [] 106 | for i in range(1500): 107 | rand_index = np.random.choice(len(x_vals), size=batch_size) 108 | rand_x = np.transpose([x_vals[rand_index]]) 109 | rand_y = np.transpose([y_vals[rand_index]]) 110 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 111 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 112 | loss_vec.append(temp_loss[0]) 113 | if (i+1)%300==0: 114 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 115 | print('Loss = ' + str(temp_loss)) 116 | print('\n') 117 | 118 | ### 119 | # Extract regression results 120 | ### 121 | 122 | # Get the optimal coefficients 123 | [slope] = sess.run(A) 124 | [y_intercept] = sess.run(b) 125 | 126 | # Get best fit line 127 | best_fit = [] 128 | for i in x_vals: 129 | best_fit.append(slope*i+y_intercept) 130 | 131 | 132 | ### 133 | # Plot results 134 | ### 135 | 136 | # Plot regression line against data points 137 | plt.plot(x_vals, y_vals, 'o', label='Data Points') 138 | plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) 139 | plt.legend(loc='upper left') 140 | plt.title('Sepal Length vs Pedal Width') 141 | plt.xlabel('Pedal Width') 142 | plt.ylabel('Sepal Length') 143 | plt.show() 144 | 145 | # Plot loss over time 146 | plt.plot(loss_vec, 'k-') 147 | plt.title(regression_type + ' Loss per Generation') 148 | plt.xlabel('Generation') 149 | plt.ylabel('Loss') 150 | plt.show() 151 | 152 | -------------------------------------------------------------------------------- /03_Linear_Regression/07_Implementing_Elasticnet_Regression/07_elasticnet_regression.py: -------------------------------------------------------------------------------- 1 | # Elastic Net Regression 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve elastic net regression. 6 | # y = Ax + b 7 | # 8 | # We will use the iris data, specifically: 9 | # y = Sepal Length 10 | # x = Pedal Length, Petal Width, Sepal Width 11 | 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import tensorflow as tf 15 | from sklearn import datasets 16 | from tensorflow.python.framework import ops 17 | 18 | ### 19 | # Set up for TensorFlow 20 | ### 21 | 22 | ops.reset_default_graph() 23 | 24 | # Create graph 25 | sess = tf.Session() 26 | 27 | ### 28 | # Obtain data 29 | ### 30 | 31 | # Load the data 32 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 33 | iris = datasets.load_iris() 34 | x_vals = np.array([[x[1], x[2], x[3]] for x in iris.data]) 35 | y_vals = np.array([y[0] for y in iris.data]) 36 | 37 | ### 38 | # Setup model 39 | ### 40 | 41 | # make results reproducible 42 | seed = 13 43 | np.random.seed(seed) 44 | tf.set_random_seed(seed) 45 | 46 | # Declare batch size 47 | batch_size = 50 48 | 49 | # Initialize placeholders 50 | x_data = tf.placeholder(shape=[None, 3], dtype=tf.float32) 51 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 52 | 53 | # Create variables for linear regression 54 | A = tf.Variable(tf.random_normal(shape=[3,1])) 55 | b = tf.Variable(tf.random_normal(shape=[1,1])) 56 | 57 | # Declare model operations 58 | model_output = tf.add(tf.matmul(x_data, A), b) 59 | 60 | # Declare the elastic net loss function 61 | elastic_param1 = tf.constant(1.) 62 | elastic_param2 = tf.constant(1.) 63 | l1_a_loss = tf.reduce_mean(tf.abs(A)) 64 | l2_a_loss = tf.reduce_mean(tf.square(A)) 65 | e1_term = tf.multiply(elastic_param1, l1_a_loss) 66 | e2_term = tf.multiply(elastic_param2, l2_a_loss) 67 | loss = tf.expand_dims(tf.add(tf.add(tf.reduce_mean(tf.square(y_target - model_output)), e1_term), e2_term), 0) 68 | 69 | # Declare optimizer 70 | my_opt = tf.train.GradientDescentOptimizer(0.001) 71 | train_step = my_opt.minimize(loss) 72 | 73 | ### 74 | # Train model 75 | ### 76 | 77 | # Initialize variables 78 | init = tf.global_variables_initializer() 79 | sess.run(init) 80 | 81 | # Training loop 82 | loss_vec = [] 83 | for i in range(1000): 84 | rand_index = np.random.choice(len(x_vals), size=batch_size) 85 | rand_x = x_vals[rand_index] 86 | rand_y = np.transpose([y_vals[rand_index]]) 87 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 88 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 89 | loss_vec.append(temp_loss[0]) 90 | if (i+1)%250==0: 91 | print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 92 | print('Loss = ' + str(temp_loss)) 93 | 94 | ### 95 | # Extract model results 96 | ### 97 | 98 | # Get the optimal coefficients 99 | [[sw_coef], [pl_coef], [pw_ceof]] = sess.run(A) 100 | [y_intercept] = sess.run(b) 101 | 102 | ### 103 | # Plot results 104 | ### 105 | 106 | # Plot loss over time 107 | plt.plot(loss_vec, 'k-') 108 | plt.title('Loss per Generation') 109 | plt.xlabel('Generation') 110 | plt.ylabel('Loss') 111 | plt.show() 112 | 113 | -------------------------------------------------------------------------------- /03_Linear_Regression/08_Implementing_Logistic_Regression/08_logistic_regression.py: -------------------------------------------------------------------------------- 1 | # Logistic Regression 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve logistic regression. 6 | # y = sigmoid(Ax + b) 7 | # 8 | # We will use the low birth weight data, specifically: 9 | # y = 0 or 1 = low birth weight 10 | # x = demographic and medical history data 11 | 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | import tensorflow as tf 15 | import requests 16 | from tensorflow.python.framework import ops 17 | import os.path 18 | import csv 19 | 20 | 21 | ops.reset_default_graph() 22 | 23 | # Create graph 24 | sess = tf.Session() 25 | 26 | ### 27 | # Obtain and prepare data for modeling 28 | ### 29 | 30 | # Set name of data file 31 | birth_weight_file = 'birth_weight.csv' 32 | 33 | # Download data and create data file if file does not exist in current directory 34 | if not os.path.exists(birth_weight_file): 35 | birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat' 36 | birth_file = requests.get(birthdata_url) 37 | birth_data = birth_file.text.split('\r\n') 38 | birth_header = birth_data[0].split('\t') 39 | birth_data = [[float(x) for x in y.split('\t') if len(x)>=1] for y in birth_data[1:] if len(y)>=1] 40 | with open(birth_weight_file, 'w', newline='') as f: 41 | writer = csv.writer(f) 42 | writer.writerow(birth_header) 43 | writer.writerows(birth_data) 44 | f.close() 45 | 46 | # Read birth weight data into memory 47 | birth_data = [] 48 | with open(birth_weight_file, newline='') as csvfile: 49 | csv_reader = csv.reader(csvfile) 50 | birth_header = next(csv_reader) 51 | for row in csv_reader: 52 | birth_data.append(row) 53 | 54 | birth_data = [[float(x) for x in row] for row in birth_data] 55 | 56 | # Pull out target variable 57 | y_vals = np.array([x[0] for x in birth_data]) 58 | # Pull out predictor variables (not id, not target, and not birthweight) 59 | x_vals = np.array([x[1:8] for x in birth_data]) 60 | 61 | # Set for reproducible results 62 | seed = 99 63 | np.random.seed(seed) 64 | tf.set_random_seed(seed) 65 | 66 | # Split data into train/test = 80%/20% 67 | train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) 68 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 69 | x_vals_train = x_vals[train_indices] 70 | x_vals_test = x_vals[test_indices] 71 | y_vals_train = y_vals[train_indices] 72 | y_vals_test = y_vals[test_indices] 73 | 74 | # Normalize by column (min-max norm) 75 | def normalize_cols(m): 76 | col_max = m.max(axis=0) 77 | col_min = m.min(axis=0) 78 | return (m-col_min) / (col_max - col_min) 79 | 80 | x_vals_train = np.nan_to_num(normalize_cols(x_vals_train)) 81 | x_vals_test = np.nan_to_num(normalize_cols(x_vals_test)) 82 | 83 | ### 84 | # Define Tensorflow computational graph¶ 85 | ### 86 | 87 | # Declare batch size 88 | batch_size = 25 89 | 90 | # Initialize placeholders 91 | x_data = tf.placeholder(shape=[None, 7], dtype=tf.float32) 92 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 93 | 94 | # Create variables for linear regression 95 | A = tf.Variable(tf.random_normal(shape=[7,1])) 96 | b = tf.Variable(tf.random_normal(shape=[1,1])) 97 | 98 | # Declare model operations 99 | model_output = tf.add(tf.matmul(x_data, A), b) 100 | 101 | # Declare loss function (Cross Entropy loss) 102 | loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target)) 103 | 104 | # Declare optimizer 105 | my_opt = tf.train.GradientDescentOptimizer(0.01) 106 | train_step = my_opt.minimize(loss) 107 | 108 | ### 109 | # Train model 110 | ### 111 | 112 | # Initialize variables 113 | init = tf.global_variables_initializer() 114 | sess.run(init) 115 | 116 | # Actual Prediction 117 | prediction = tf.round(tf.sigmoid(model_output)) 118 | predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32) 119 | accuracy = tf.reduce_mean(predictions_correct) 120 | 121 | # Training loop 122 | loss_vec = [] 123 | train_acc = [] 124 | test_acc = [] 125 | for i in range(1500): 126 | rand_index = np.random.choice(len(x_vals_train), size=batch_size) 127 | rand_x = x_vals_train[rand_index] 128 | rand_y = np.transpose([y_vals_train[rand_index]]) 129 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 130 | 131 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 132 | loss_vec.append(temp_loss) 133 | temp_acc_train = sess.run(accuracy, feed_dict={x_data: x_vals_train, y_target: np.transpose([y_vals_train])}) 134 | train_acc.append(temp_acc_train) 135 | temp_acc_test = sess.run(accuracy, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])}) 136 | test_acc.append(temp_acc_test) 137 | if (i+1)%300==0: 138 | print('Loss = ' + str(temp_loss)) 139 | 140 | 141 | ### 142 | # Display model performance 143 | ### 144 | 145 | # Plot loss over time 146 | plt.plot(loss_vec, 'k-') 147 | plt.title('Cross Entropy Loss per Generation') 148 | plt.xlabel('Generation') 149 | plt.ylabel('Cross Entropy Loss') 150 | plt.show() 151 | 152 | # Plot train and test accuracy 153 | plt.plot(train_acc, 'k-', label='Train Set Accuracy') 154 | plt.plot(test_acc, 'r--', label='Test Set Accuracy') 155 | plt.title('Train and Test Accuracy') 156 | plt.xlabel('Generation') 157 | plt.ylabel('Accuracy') 158 | plt.legend(loc='lower right') 159 | plt.show() 160 | 161 | -------------------------------------------------------------------------------- /03_Linear_Regression/images/01_Inverse_Matrix_Method.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/01_Inverse_Matrix_Method.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/02_Cholesky_Decomposition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/02_Cholesky_Decomposition.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/03_lin_reg_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/03_lin_reg_fit.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/03_lin_reg_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/03_lin_reg_loss.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/04_L1_L2_learningrates.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/04_L1_L2_learningrates.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/04_L1_L2_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/04_L1_L2_loss.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/04_L1_L2_loss2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/04_L1_L2_loss2.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/05_demming_reg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/05_demming_reg.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/05_demming_vs_linear_reg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/05_demming_vs_linear_reg.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/07_elasticnet_reg_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/07_elasticnet_reg_loss.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/08_logistic_reg_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/08_logistic_reg_acc.png -------------------------------------------------------------------------------- /03_Linear_Regression/images/08_logistic_reg_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/03_Linear_Regression/images/08_logistic_reg_loss.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py: -------------------------------------------------------------------------------- 1 | # Linear Support Vector Machine: Soft Margin 2 | # ---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # create a soft margin SVM 6 | # 7 | # We will use the iris data, specifically: 8 | # x1 = Sepal Length 9 | # x2 = Petal Width 10 | # Class 1 : I. setosa 11 | # Class -1: not I. setosa 12 | # 13 | # We know here that x and y are linearly seperable 14 | # for I. setosa classification. 15 | 16 | import matplotlib.pyplot as plt 17 | import numpy as np 18 | import tensorflow as tf 19 | from sklearn import datasets 20 | from tensorflow.python.framework import ops 21 | ops.reset_default_graph() 22 | 23 | # Set random seeds 24 | np.random.seed(7) 25 | tf.set_random_seed(7) 26 | 27 | # Create graph 28 | sess = tf.Session() 29 | 30 | # Load the data 31 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 32 | iris = datasets.load_iris() 33 | x_vals = np.array([[x[0], x[3]] for x in iris.data]) 34 | y_vals = np.array([1 if y == 0 else -1 for y in iris.target]) 35 | 36 | # Split data into train/test sets 37 | train_indices = np.random.choice(len(x_vals), 38 | round(len(x_vals)*0.9), 39 | replace=False) 40 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 41 | x_vals_train = x_vals[train_indices] 42 | x_vals_test = x_vals[test_indices] 43 | y_vals_train = y_vals[train_indices] 44 | y_vals_test = y_vals[test_indices] 45 | 46 | # Declare batch size 47 | batch_size = 135 48 | 49 | # Initialize placeholders 50 | x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32) 51 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 52 | 53 | # Create variables for linear regression 54 | A = tf.Variable(tf.random_normal(shape=[2, 1])) 55 | b = tf.Variable(tf.random_normal(shape=[1, 1])) 56 | 57 | # Declare model operations 58 | model_output = tf.subtract(tf.matmul(x_data, A), b) 59 | 60 | # Declare vector L2 'norm' function squared 61 | l2_norm = tf.reduce_sum(tf.square(A)) 62 | 63 | # Declare loss function 64 | # Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2 65 | # L2 regularization parameter, alpha 66 | alpha = tf.constant([0.01]) 67 | # Margin term in loss 68 | classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target)))) 69 | # Put terms together 70 | loss = tf.add(classification_term, tf.multiply(alpha, l2_norm)) 71 | 72 | # Declare prediction function 73 | prediction = tf.sign(model_output) 74 | accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32)) 75 | 76 | # Declare optimizer 77 | my_opt = tf.train.GradientDescentOptimizer(0.01) 78 | train_step = my_opt.minimize(loss) 79 | 80 | # Initialize variables 81 | init = tf.global_variables_initializer() 82 | sess.run(init) 83 | 84 | # Training loop 85 | loss_vec = [] 86 | train_accuracy = [] 87 | test_accuracy = [] 88 | for i in range(500): 89 | rand_index = np.random.choice(len(x_vals_train), size=batch_size) 90 | rand_x = x_vals_train[rand_index] 91 | rand_y = np.transpose([y_vals_train[rand_index]]) 92 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 93 | 94 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 95 | loss_vec.append(temp_loss) 96 | 97 | train_acc_temp = sess.run(accuracy, feed_dict={ 98 | x_data: x_vals_train, 99 | y_target: np.transpose([y_vals_train])}) 100 | train_accuracy.append(train_acc_temp) 101 | 102 | test_acc_temp = sess.run(accuracy, feed_dict={ 103 | x_data: x_vals_test, 104 | y_target: np.transpose([y_vals_test])}) 105 | test_accuracy.append(test_acc_temp) 106 | 107 | if (i + 1) % 100 == 0: 108 | print('Step #{} A = {}, b = {}'.format( 109 | str(i+1), 110 | str(sess.run(A)), 111 | str(sess.run(b)) 112 | )) 113 | print('Loss = ' + str(temp_loss)) 114 | 115 | # Extract coefficients 116 | [[a1], [a2]] = sess.run(A) 117 | [[b]] = sess.run(b) 118 | slope = -a2/a1 119 | y_intercept = b/a1 120 | 121 | # Extract x1 and x2 vals 122 | x1_vals = [d[1] for d in x_vals] 123 | 124 | # Get best fit line 125 | best_fit = [] 126 | for i in x1_vals: 127 | best_fit.append(slope*i+y_intercept) 128 | 129 | # Separate I. setosa 130 | setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1] 131 | setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1] 132 | not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1] 133 | not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1] 134 | 135 | # Plot data and line 136 | plt.plot(setosa_x, setosa_y, 'o', label='I. setosa') 137 | plt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa') 138 | plt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3) 139 | plt.ylim([0, 10]) 140 | plt.legend(loc='lower right') 141 | plt.title('Sepal Length vs Pedal Width') 142 | plt.xlabel('Pedal Width') 143 | plt.ylabel('Sepal Length') 144 | plt.show() 145 | 146 | # Plot train/test accuracies 147 | plt.plot(train_accuracy, 'k-', label='Training Accuracy') 148 | plt.plot(test_accuracy, 'r--', label='Test Accuracy') 149 | plt.title('Train and Test Set Accuracies') 150 | plt.xlabel('Generation') 151 | plt.ylabel('Accuracy') 152 | plt.legend(loc='lower right') 153 | plt.show() 154 | 155 | # Plot loss over time 156 | plt.plot(loss_vec, 'k-') 157 | plt.title('Loss per Generation') 158 | plt.xlabel('Generation') 159 | plt.ylabel('Loss') 160 | plt.show() 161 | -------------------------------------------------------------------------------- /04_Support_Vector_Machines/03_Reduction_to_Linear_Regression/03_support_vector_regression.py: -------------------------------------------------------------------------------- 1 | # SVM Regression 2 | #---------------------------------- 3 | # 4 | # This function shows how to use TensorFlow to 5 | # solve support vector regression. We are going 6 | # to find the line that has the maximum margin 7 | # which INCLUDES as many points as possible 8 | # 9 | # We will use the iris data, specifically: 10 | # y = Sepal Length 11 | # x = Pedal Width 12 | 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | import tensorflow as tf 16 | from sklearn import datasets 17 | from tensorflow.python.framework import ops 18 | ops.reset_default_graph() 19 | 20 | # Create graph 21 | sess = tf.Session() 22 | 23 | # Load the data 24 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 25 | iris = datasets.load_iris() 26 | x_vals = np.array([x[3] for x in iris.data]) 27 | y_vals = np.array([y[0] for y in iris.data]) 28 | 29 | # Split data into train/test sets 30 | train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) 31 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 32 | x_vals_train = x_vals[train_indices] 33 | x_vals_test = x_vals[test_indices] 34 | y_vals_train = y_vals[train_indices] 35 | y_vals_test = y_vals[test_indices] 36 | 37 | # Declare batch size 38 | batch_size = 50 39 | 40 | # Initialize placeholders 41 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 42 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 43 | 44 | # Create variables for linear regression 45 | A = tf.Variable(tf.random_normal(shape=[1,1])) 46 | b = tf.Variable(tf.random_normal(shape=[1,1])) 47 | 48 | # Declare model operations 49 | model_output = tf.add(tf.matmul(x_data, A), b) 50 | 51 | # Declare loss function 52 | # = max(0, abs(target - predicted) + epsilon) 53 | # 1/2 margin width parameter = epsilon 54 | epsilon = tf.constant([0.5]) 55 | # Margin term in loss 56 | loss = tf.reduce_mean(tf.maximum(0., tf.subtract(tf.abs(tf.subtract(model_output, y_target)), epsilon))) 57 | 58 | # Declare optimizer 59 | my_opt = tf.train.GradientDescentOptimizer(0.075) 60 | train_step = my_opt.minimize(loss) 61 | 62 | # Initialize variables 63 | init = tf.global_variables_initializer() 64 | sess.run(init) 65 | 66 | # Training loop 67 | train_loss = [] 68 | test_loss = [] 69 | for i in range(200): 70 | rand_index = np.random.choice(len(x_vals_train), size=batch_size) 71 | rand_x = np.transpose([x_vals_train[rand_index]]) 72 | rand_y = np.transpose([y_vals_train[rand_index]]) 73 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 74 | 75 | temp_train_loss = sess.run(loss, feed_dict={x_data: np.transpose([x_vals_train]), y_target: np.transpose([y_vals_train])}) 76 | train_loss.append(temp_train_loss) 77 | 78 | temp_test_loss = sess.run(loss, feed_dict={x_data: np.transpose([x_vals_test]), y_target: np.transpose([y_vals_test])}) 79 | test_loss.append(temp_test_loss) 80 | if (i+1)%50==0: 81 | print('-----------') 82 | print('Generation: ' + str(i+1)) 83 | print('A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) 84 | print('Train Loss = ' + str(temp_train_loss)) 85 | print('Test Loss = ' + str(temp_test_loss)) 86 | 87 | # Extract Coefficients 88 | [[slope]] = sess.run(A) 89 | [[y_intercept]] = sess.run(b) 90 | [width] = sess.run(epsilon) 91 | 92 | # Get best fit line 93 | best_fit = [] 94 | best_fit_upper = [] 95 | best_fit_lower = [] 96 | for i in x_vals: 97 | best_fit.append(slope*i+y_intercept) 98 | best_fit_upper.append(slope*i+y_intercept+width) 99 | best_fit_lower.append(slope*i+y_intercept-width) 100 | 101 | # Plot fit with data 102 | plt.plot(x_vals, y_vals, 'o', label='Data Points') 103 | plt.plot(x_vals, best_fit, 'r-', label='SVM Regression Line', linewidth=3) 104 | plt.plot(x_vals, best_fit_upper, 'r--', linewidth=2) 105 | plt.plot(x_vals, best_fit_lower, 'r--', linewidth=2) 106 | plt.ylim([0, 10]) 107 | plt.legend(loc='lower right') 108 | plt.title('Sepal Length vs Pedal Width') 109 | plt.xlabel('Pedal Width') 110 | plt.ylabel('Sepal Length') 111 | plt.show() 112 | 113 | # Plot loss over time 114 | plt.plot(train_loss, 'k-', label='Train Set Loss') 115 | plt.plot(test_loss, 'r--', label='Test Set Loss') 116 | plt.title('L2 Loss per Generation') 117 | plt.xlabel('Generation') 118 | plt.ylabel('L2 Loss') 119 | plt.legend(loc='upper right') 120 | plt.show() 121 | -------------------------------------------------------------------------------- /04_Support_Vector_Machines/05_Implementing_Nonlinear_SVMs/05_nonlinear_svm.py: -------------------------------------------------------------------------------- 1 | # Nonlinear SVM Example 2 | # 3 | # This function wll illustrate how to 4 | # implement the gaussian kernel on 5 | # the iris dataset. 6 | # 7 | # Gaussian Kernel: 8 | # K(x1, x2) = exp(-gamma * abs(x1 - x2)^2) 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | import tensorflow as tf 13 | from sklearn import datasets 14 | from tensorflow.python.framework import ops 15 | ops.reset_default_graph() 16 | 17 | # Create graph 18 | sess = tf.Session() 19 | 20 | # Load the data 21 | # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] 22 | iris = datasets.load_iris() 23 | x_vals = np.array([[x[0], x[3]] for x in iris.data]) 24 | y_vals = np.array([1 if y == 0 else -1 for y in iris.target]) 25 | class1_x = [x[0] for i, x in enumerate(x_vals) if y_vals[i] == 1] 26 | class1_y = [x[1] for i, x in enumerate(x_vals) if y_vals[i] == 1] 27 | class2_x = [x[0] for i, x in enumerate(x_vals) if y_vals[i] == -1] 28 | class2_y = [x[1] for i, x in enumerate(x_vals) if y_vals[i] == -1] 29 | 30 | # Declare batch size 31 | batch_size = 150 32 | 33 | # Initialize placeholders 34 | x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32) 35 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 36 | prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32) 37 | 38 | # Create variables for svm 39 | b = tf.Variable(tf.random_normal(shape=[1, batch_size])) 40 | 41 | # Gaussian (RBF) kernel 42 | gamma = tf.constant(-25.0) 43 | sq_dists = tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data))) 44 | my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists))) 45 | 46 | # Compute SVM Model 47 | first_term = tf.reduce_sum(b) 48 | b_vec_cross = tf.matmul(tf.transpose(b), b) 49 | y_target_cross = tf.matmul(y_target, tf.transpose(y_target)) 50 | second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross))) 51 | loss = tf.negative(tf.subtract(first_term, second_term)) 52 | 53 | # Gaussian (RBF) prediction kernel 54 | rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1]) 55 | rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1]) 56 | pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB)) 57 | pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist))) 58 | 59 | prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b), pred_kernel) 60 | prediction = tf.sign(prediction_output - tf.reduce_mean(prediction_output)) 61 | accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32)) 62 | 63 | # Declare optimizer 64 | my_opt = tf.train.GradientDescentOptimizer(0.01) 65 | train_step = my_opt.minimize(loss) 66 | 67 | # Initialize variables 68 | init = tf.global_variables_initializer() 69 | sess.run(init) 70 | 71 | # Training loop 72 | loss_vec = [] 73 | batch_accuracy = [] 74 | for i in range(300): 75 | rand_index = np.random.choice(len(x_vals), size=batch_size) 76 | rand_x = x_vals[rand_index] 77 | rand_y = np.transpose([y_vals[rand_index]]) 78 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 79 | 80 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 81 | loss_vec.append(temp_loss) 82 | 83 | acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, 84 | y_target: rand_y, 85 | prediction_grid: rand_x}) 86 | batch_accuracy.append(acc_temp) 87 | 88 | if (i + 1) % 75 == 0: 89 | print('Step #' + str(i + 1)) 90 | print('Loss = ' + str(temp_loss)) 91 | 92 | # Create a mesh to plot points in 93 | x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1 94 | y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1 95 | xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), 96 | np.arange(y_min, y_max, 0.02)) 97 | grid_points = np.c_[xx.ravel(), yy.ravel()] 98 | [grid_predictions] = sess.run(prediction, feed_dict={x_data: x_vals, 99 | y_target: np.transpose([y_vals]), 100 | prediction_grid: grid_points}) 101 | grid_predictions = grid_predictions.reshape(xx.shape) 102 | 103 | # Plot points and grid 104 | plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired, alpha=0.8) 105 | plt.plot(class1_x, class1_y, 'ro', label='I. setosa') 106 | plt.plot(class2_x, class2_y, 'kx', label='Non setosa') 107 | plt.title('Gaussian SVM Results on Iris Data') 108 | plt.xlabel('Pedal Length') 109 | plt.ylabel('Sepal Width') 110 | plt.legend(loc='lower right') 111 | plt.ylim([-0.5, 3.0]) 112 | plt.xlim([3.5, 8.5]) 113 | plt.show() 114 | 115 | # Plot batch accuracy 116 | plt.plot(batch_accuracy, 'k-', label='Accuracy') 117 | plt.title('Batch Accuracy') 118 | plt.xlabel('Generation') 119 | plt.ylabel('Accuracy') 120 | plt.legend(loc='lower right') 121 | plt.show() 122 | 123 | # Plot loss over time 124 | plt.plot(loss_vec, 'k-') 125 | plt.title('Loss per Generation') 126 | plt.xlabel('Generation') 127 | plt.ylabel('Loss') 128 | plt.show() 129 | -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/01_introduction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/01_introduction.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/02_linear_svm_accuracy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/02_linear_svm_accuracy.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/02_linear_svm_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/02_linear_svm_loss.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/02_linear_svm_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/02_linear_svm_output.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/03_linear_svm_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/03_linear_svm_loss.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/03_svm_regression_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/03_svm_regression_output.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/04_linear_svm_gaussian.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/04_linear_svm_gaussian.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/04_nonlinear_data_linear_kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/04_nonlinear_data_linear_kernel.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/05_non_linear_svms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/05_non_linear_svms.png -------------------------------------------------------------------------------- /04_Support_Vector_Machines/images/06_multiclass_svm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/04_Support_Vector_Machines/images/06_multiclass_svm.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/02_Working_with_Nearest_Neighbors/02_nearest_neighbor.py: -------------------------------------------------------------------------------- 1 | # k-Nearest Neighbor 2 | #---------------------------------- 3 | # 4 | # This function illustrates how to use 5 | # k-nearest neighbors in tensorflow 6 | # 7 | # We will use the 1970s Boston housing dataset 8 | # which is available through the UCI 9 | # ML data repository. 10 | # 11 | # Data: 12 | #----------x-values----------- 13 | # CRIM : per capita crime rate by town 14 | # ZN : prop. of res. land zones 15 | # INDUS : prop. of non-retail business acres 16 | # CHAS : Charles river dummy variable 17 | # NOX : nitrix oxides concentration / 10 M 18 | # RM : Avg. # of rooms per building 19 | # AGE : prop. of buildings built prior to 1940 20 | # DIS : Weighted distances to employment centers 21 | # RAD : Index of radian highway access 22 | # TAX : Full tax rate value per $10k 23 | # PTRATIO: Pupil/Teacher ratio by town 24 | # B : 1000*(Bk-0.63)^2, Bk=prop. of blacks 25 | # LSTAT : % lower status of pop 26 | #------------y-value----------- 27 | # MEDV : Median Value of homes in $1,000's 28 | 29 | import matplotlib.pyplot as plt 30 | import numpy as np 31 | import tensorflow as tf 32 | import requests 33 | from tensorflow.python.framework import ops 34 | ops.reset_default_graph() 35 | 36 | # Create graph 37 | sess = tf.Session() 38 | 39 | # Load the data 40 | housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' 41 | housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] 42 | cols_used = ['CRIM', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'TAX', 'PTRATIO', 'B', 'LSTAT'] 43 | num_features = len(cols_used) 44 | housing_file = requests.get(housing_url) 45 | housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1] 46 | 47 | y_vals = np.transpose([np.array([y[13] for y in housing_data])]) 48 | x_vals = np.array([[x for i,x in enumerate(y) if housing_header[i] in cols_used] for y in housing_data]) 49 | 50 | ## Min-Max Scaling 51 | x_vals = (x_vals - x_vals.min(0)) / x_vals.ptp(0) 52 | 53 | # Split the data into train and test sets 54 | np.random.seed(13) #make results reproducible 55 | train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) 56 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 57 | x_vals_train = x_vals[train_indices] 58 | x_vals_test = x_vals[test_indices] 59 | y_vals_train = y_vals[train_indices] 60 | y_vals_test = y_vals[test_indices] 61 | 62 | # Declare k-value and batch size 63 | k = 4 64 | batch_size=len(x_vals_test) 65 | 66 | # Placeholders 67 | x_data_train = tf.placeholder(shape=[None, num_features], dtype=tf.float32) 68 | x_data_test = tf.placeholder(shape=[None, num_features], dtype=tf.float32) 69 | y_target_train = tf.placeholder(shape=[None, 1], dtype=tf.float32) 70 | y_target_test = tf.placeholder(shape=[None, 1], dtype=tf.float32) 71 | 72 | # Declare distance metric 73 | # L1 74 | distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), axis=2) 75 | 76 | # L2 77 | #distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=1)) 78 | 79 | # Predict: Get min distance index (Nearest neighbor) 80 | #prediction = tf.arg_min(distance, 0) 81 | top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k) 82 | x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1) 83 | x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32)) 84 | x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1) 85 | 86 | top_k_yvals = tf.gather(y_target_train, top_k_indices) 87 | prediction = tf.squeeze(tf.matmul(x_val_weights,top_k_yvals), axis=[1]) 88 | 89 | # Calculate MSE 90 | mse = tf.div(tf.reduce_sum(tf.square(tf.subtract(prediction, y_target_test))), batch_size) 91 | 92 | # Calculate how many loops over training data 93 | num_loops = int(np.ceil(len(x_vals_test)/batch_size)) 94 | 95 | for i in range(num_loops): 96 | min_index = i*batch_size 97 | max_index = min((i+1)*batch_size,len(x_vals_train)) 98 | x_batch = x_vals_test[min_index:max_index] 99 | y_batch = y_vals_test[min_index:max_index] 100 | predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, 101 | y_target_train: y_vals_train, y_target_test: y_batch}) 102 | batch_mse = sess.run(mse, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, 103 | y_target_train: y_vals_train, y_target_test: y_batch}) 104 | 105 | print('Batch #' + str(i+1) + ' MSE: ' + str(np.round(batch_mse,3))) 106 | 107 | # Plot prediction and actual distribution 108 | bins = np.linspace(5, 50, 45) 109 | 110 | plt.hist(predictions, bins, alpha=0.5, label='Prediction') 111 | plt.hist(y_batch, bins, alpha=0.5, label='Actual') 112 | plt.title('Histogram of Predicted and Actual Values') 113 | plt.xlabel('Med Home Value in $1,000s') 114 | plt.ylabel('Frequency') 115 | plt.legend(loc='upper right') 116 | plt.show() 117 | 118 | -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/03_Working_with_Text_Distances/03_text_distances.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Text Distances\n", 8 | "\n", 9 | "\n", 10 | "This notebook illustrates how to use the Levenstein distance (edit distance) in TensorFlow.\n", 11 | "\n", 12 | "Get required libarary and start tensorflow session." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "import tensorflow as tf\n", 24 | "\n", 25 | "sess = tf.Session()" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "metadata": {}, 31 | "source": [ 32 | "### First compute the edit distance between 'bear' and 'beers'" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 2, 38 | "metadata": { 39 | "collapsed": false 40 | }, 41 | "outputs": [ 42 | { 43 | "name": "stdout", 44 | "output_type": "stream", 45 | "text": [ 46 | "[[ 2.]]\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "hypothesis = list('bear')\n", 52 | "truth = list('beers')\n", 53 | "h1 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3]],\n", 54 | " hypothesis,\n", 55 | " [1,1,1])\n", 56 | "\n", 57 | "t1 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3],[0,0,4]],\n", 58 | " truth,\n", 59 | " [1,1,1])\n", 60 | "\n", 61 | "print(sess.run(tf.edit_distance(h1, t1, normalize=False)))" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "### Compute the edit distance between ('bear','beer') and 'beers':" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 3, 74 | "metadata": { 75 | "collapsed": false 76 | }, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "[[ 0.40000001 0.2 ]]\n" 83 | ] 84 | } 85 | ], 86 | "source": [ 87 | "hypothesis2 = list('bearbeer')\n", 88 | "truth2 = list('beersbeers')\n", 89 | "h2 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3], [0,1,0], [0,1,1], [0,1,2], [0,1,3]],\n", 90 | " hypothesis2,\n", 91 | " [1,2,4])\n", 92 | "\n", 93 | "t2 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3], [0,0,4], [0,1,0], [0,1,1], [0,1,2], [0,1,3], [0,1,4]],\n", 94 | " truth2,\n", 95 | " [1,2,5])\n", 96 | "\n", 97 | "print(sess.run(tf.edit_distance(h2, t2, normalize=True)))" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "### Now compute distance between four words and 'beers' more efficiently:" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 4, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "[[ 0.40000001]\n", 119 | " [ 0.60000002]\n", 120 | " [ 1. ]\n", 121 | " [ 1. ]]\n" 122 | ] 123 | } 124 | ], 125 | "source": [ 126 | "hypothesis_words = ['bear','bar','tensor','flow']\n", 127 | "truth_word = ['beers']\n", 128 | "\n", 129 | "num_h_words = len(hypothesis_words)\n", 130 | "h_indices = [[xi, 0, yi] for xi,x in enumerate(hypothesis_words) for yi,y in enumerate(x)]\n", 131 | "h_chars = list(''.join(hypothesis_words))\n", 132 | "\n", 133 | "h3 = tf.SparseTensor(h_indices, h_chars, [num_h_words,1,1])\n", 134 | "\n", 135 | "truth_word_vec = truth_word*num_h_words\n", 136 | "t_indices = [[xi, 0, yi] for xi,x in enumerate(truth_word_vec) for yi,y in enumerate(x)]\n", 137 | "t_chars = list(''.join(truth_word_vec))\n", 138 | "\n", 139 | "t3 = tf.SparseTensor(t_indices, t_chars, [num_h_words,1,1])\n", 140 | "\n", 141 | "print(sess.run(tf.edit_distance(h3, t3, normalize=True)))" 142 | ] 143 | } 144 | ], 145 | "metadata": { 146 | "anaconda-cloud": {}, 147 | "kernelspec": { 148 | "display_name": "Python [conda env:tf-cpu]", 149 | "language": "python", 150 | "name": "conda-env-tf-cpu-py" 151 | }, 152 | "language_info": { 153 | "codemirror_mode": { 154 | "name": "ipython", 155 | "version": 3 156 | }, 157 | "file_extension": ".py", 158 | "mimetype": "text/x-python", 159 | "name": "python", 160 | "nbconvert_exporter": "python", 161 | "pygments_lexer": "ipython3", 162 | "version": "3.5.2" 163 | } 164 | }, 165 | "nbformat": 4, 166 | "nbformat_minor": 2 167 | } 168 | -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/03_Working_with_Text_Distances/03_text_distances.py: -------------------------------------------------------------------------------- 1 | # Text Distances 2 | #---------------------------------- 3 | # 4 | # This function illustrates how to use 5 | # the Levenstein distance (edit distance) 6 | # in TensorFlow. 7 | 8 | import tensorflow as tf 9 | 10 | sess = tf.Session() 11 | 12 | #---------------------------------- 13 | # First compute the edit distance between 'bear' and 'beers' 14 | hypothesis = list('bear') 15 | truth = list('beers') 16 | h1 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3]], 17 | hypothesis, 18 | [1,1,1]) 19 | 20 | t1 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3],[0,0,4]], 21 | truth, 22 | [1,1,1]) 23 | 24 | print(sess.run(tf.edit_distance(h1, t1, normalize=False))) 25 | 26 | #---------------------------------- 27 | # Compute the edit distance between ('bear','beer') and 'beers': 28 | hypothesis2 = list('bearbeer') 29 | truth2 = list('beersbeers') 30 | h2 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3], [0,1,0], [0,1,1], [0,1,2], [0,1,3]], 31 | hypothesis2, 32 | [1,2,4]) 33 | 34 | t2 = tf.SparseTensor([[0,0,0], [0,0,1], [0,0,2], [0,0,3], [0,0,4], [0,1,0], [0,1,1], [0,1,2], [0,1,3], [0,1,4]], 35 | truth2, 36 | [1,2,5]) 37 | 38 | print(sess.run(tf.edit_distance(h2, t2, normalize=True))) 39 | 40 | #---------------------------------- 41 | # Now compute distance between four words and 'beers' more efficiently with sparse tensors: 42 | hypothesis_words = ['bear','bar','tensor','flow'] 43 | truth_word = ['beers'] 44 | 45 | num_h_words = len(hypothesis_words) 46 | h_indices = [[xi, 0, yi] for xi,x in enumerate(hypothesis_words) for yi,y in enumerate(x)] 47 | h_chars = list(''.join(hypothesis_words)) 48 | 49 | h3 = tf.SparseTensor(h_indices, h_chars, [num_h_words,1,1]) 50 | 51 | truth_word_vec = truth_word*num_h_words 52 | t_indices = [[xi, 0, yi] for xi,x in enumerate(truth_word_vec) for yi,y in enumerate(x)] 53 | t_chars = list(''.join(truth_word_vec)) 54 | 55 | t3 = tf.SparseTensor(t_indices, t_chars, [num_h_words,1,1]) 56 | 57 | print(sess.run(tf.edit_distance(h3, t3, normalize=True))) 58 | -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/04_Computing_with_Mixed_Distance_Functions/04_mixed_distance_functions_knn.py: -------------------------------------------------------------------------------- 1 | # Mixed Distance Functions for k-Nearest Neighbor 2 | #---------------------------------- 3 | # 4 | # This function shows how to use different distance 5 | # metrics on different features for kNN. 6 | # 7 | # Data: 8 | #----------x-values----------- 9 | # CRIM : per capita crime rate by town 10 | # ZN : prop. of res. land zones 11 | # INDUS : prop. of non-retail business acres 12 | # CHAS : Charles river dummy variable 13 | # NOX : nitrix oxides concentration / 10 M 14 | # RM : Avg. # of rooms per building 15 | # AGE : prop. of buildings built prior to 1940 16 | # DIS : Weighted distances to employment centers 17 | # RAD : Index of radian highway access 18 | # TAX : Full tax rate value per $10k 19 | # PTRATIO: Pupil/Teacher ratio by town 20 | # B : 1000*(Bk-0.63)^2, Bk=prop. of blacks 21 | # LSTAT : % lower status of pop 22 | #------------y-value----------- 23 | # MEDV : Median Value of homes in $1,000's 24 | 25 | 26 | import matplotlib.pyplot as plt 27 | import numpy as np 28 | import tensorflow as tf 29 | import requests 30 | from tensorflow.python.framework import ops 31 | ops.reset_default_graph() 32 | 33 | # Create graph 34 | sess = tf.Session() 35 | 36 | # Load the data 37 | housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data' 38 | housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] 39 | cols_used = ['CRIM', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'TAX', 'PTRATIO', 'B', 'LSTAT'] 40 | num_features = len(cols_used) 41 | housing_file = requests.get(housing_url) 42 | housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1] 43 | 44 | y_vals = np.transpose([np.array([y[13] for y in housing_data])]) 45 | x_vals = np.array([[x for i,x in enumerate(y) if housing_header[i] in cols_used] for y in housing_data]) 46 | 47 | ## Min-Max Scaling 48 | x_vals = (x_vals - x_vals.min(0)) / x_vals.ptp(0) 49 | 50 | ## Create distance metric weight matrix weighted by standard deviation 51 | weight_diagonal = x_vals.std(0) 52 | weight_matrix = tf.cast(tf.diag(weight_diagonal), dtype=tf.float32) 53 | 54 | # Split the data into train and test sets 55 | np.random.seed(13) # reproducible results 56 | train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) 57 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 58 | x_vals_train = x_vals[train_indices] 59 | x_vals_test = x_vals[test_indices] 60 | y_vals_train = y_vals[train_indices] 61 | y_vals_test = y_vals[test_indices] 62 | 63 | # Declare k-value and batch size 64 | k = 4 65 | batch_size=len(x_vals_test) 66 | 67 | # Placeholders 68 | x_data_train = tf.placeholder(shape=[None, num_features], dtype=tf.float32) 69 | x_data_test = tf.placeholder(shape=[None, num_features], dtype=tf.float32) 70 | y_target_train = tf.placeholder(shape=[None, 1], dtype=tf.float32) 71 | y_target_test = tf.placeholder(shape=[None, 1], dtype=tf.float32) 72 | 73 | # Declare weighted distance metric 74 | # Weighted L2 = sqrt((x-y)^T * A * (x-y)) 75 | subtraction_term = tf.subtract(x_data_train, tf.expand_dims(x_data_test,1)) 76 | first_product = tf.matmul(subtraction_term, tf.tile(tf.expand_dims(weight_matrix,0), [batch_size,1,1])) 77 | second_product = tf.matmul(first_product, tf.transpose(subtraction_term, perm=[0,2,1])) 78 | distance = tf.sqrt(tf.matrix_diag_part(second_product)) 79 | 80 | # Predict: Get min distance index (Nearest neighbor) 81 | top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k) 82 | x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1) 83 | x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32)) 84 | x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1) 85 | 86 | top_k_yvals = tf.gather(y_target_train, top_k_indices) 87 | prediction = tf.squeeze(tf.matmul(x_val_weights,top_k_yvals), axis=[1]) 88 | 89 | # Calculate MSE 90 | mse = tf.div(tf.reduce_sum(tf.square(tf.subtract(prediction, y_target_test))), batch_size) 91 | 92 | # Calculate how many loops over training data 93 | num_loops = int(np.ceil(len(x_vals_test)/batch_size)) 94 | 95 | for i in range(num_loops): 96 | min_index = i*batch_size 97 | max_index = min((i+1)*batch_size,len(x_vals_train)) 98 | x_batch = x_vals_test[min_index:max_index] 99 | y_batch = y_vals_test[min_index:max_index] 100 | predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, 101 | y_target_train: y_vals_train, y_target_test: y_batch}) 102 | batch_mse = sess.run(mse, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, 103 | y_target_train: y_vals_train, y_target_test: y_batch}) 104 | 105 | print('Batch #' + str(i+1) + ' MSE: ' + str(np.round(batch_mse,3))) 106 | 107 | # Plot prediction and actual distribution 108 | bins = np.linspace(5, 50, 45) 109 | 110 | plt.hist(predictions, bins, alpha=0.5, label='Prediction') 111 | plt.hist(y_batch, bins, alpha=0.5, label='Actual') 112 | plt.title('Histogram of Predicted and Actual Values') 113 | plt.xlabel('Med Home Value in $1,000s') 114 | plt.ylabel('Frequency') 115 | plt.legend(loc='upper right') 116 | plt.show() -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/05_An_Address_Matching_Example/05_address_matching.py: -------------------------------------------------------------------------------- 1 | # Address Matching with k-Nearest Neighbors 2 | #---------------------------------- 3 | # 4 | # This function illustrates a way to perform 5 | # address matching between two data sets. 6 | # 7 | # For each test address, we will return the 8 | # closest reference address to it. 9 | # 10 | # We will consider two distance functions: 11 | # 1) Edit distance for street number/name and 12 | # 2) Euclidian distance (L2) for the zip codes 13 | 14 | import random 15 | import string 16 | import numpy as np 17 | import tensorflow as tf 18 | from tensorflow.python.framework import ops 19 | ops.reset_default_graph() 20 | 21 | # First we generate the data sets we will need 22 | # n = Size of created data sets 23 | n = 10 24 | street_names = ['abbey', 'baker', 'canal', 'donner', 'elm'] 25 | street_types = ['rd', 'st', 'ln', 'pass', 'ave'] 26 | 27 | random.seed(31) # make results reproducible 28 | rand_zips = [random.randint(65000, 65999) for i in range(5)] 29 | 30 | 31 | # Function to randomly create one typo in a string w/ a probability 32 | def create_typo(s, prob=0.75): 33 | if random.uniform(0, 1) < prob: 34 | rand_ind = random.choice(range(len(s))) 35 | s_list = list(s) 36 | s_list[rand_ind] = random.choice(string.ascii_lowercase) 37 | s = ''.join(s_list) 38 | return s 39 | 40 | # Generate the reference dataset 41 | numbers = [random.randint(1, 9999) for _ in range(n)] 42 | streets = [random.choice(street_names) for _ in range(n)] 43 | street_suffs = [random.choice(street_types) for _ in range(n)] 44 | zips = [random.choice(rand_zips) for _ in range(n)] 45 | full_streets = [str(x) + ' ' + y + ' ' + z for x, y, z in zip(numbers, streets, street_suffs)] 46 | reference_data = [list(x) for x in zip(full_streets,zips)] 47 | 48 | # Generate test dataset with some typos 49 | typo_streets = [create_typo(x) for x in streets] 50 | typo_full_streets = [str(x) + ' ' + y + ' ' + z for x, y, z in zip(numbers, typo_streets, street_suffs)] 51 | test_data = [list(x) for x in zip(typo_full_streets, zips)] 52 | 53 | # Now we can perform address matching 54 | # Create graph 55 | sess = tf.Session() 56 | 57 | # Placeholders 58 | test_address = tf.sparse_placeholder(dtype=tf.string) 59 | test_zip = tf.placeholder(shape=[None, 1], dtype=tf.float32) 60 | ref_address = tf.sparse_placeholder(dtype=tf.string) 61 | ref_zip = tf.placeholder(shape=[None, n], dtype=tf.float32) 62 | 63 | # Declare Zip code distance for a test zip and reference set 64 | zip_dist = tf.square(tf.subtract(ref_zip, test_zip)) 65 | 66 | # Declare Edit distance for address 67 | address_dist = tf.edit_distance(test_address, ref_address, normalize=True) 68 | 69 | # Create similarity scores 70 | zip_max = tf.gather(tf.squeeze(zip_dist), tf.argmax(zip_dist, 1)) 71 | zip_min = tf.gather(tf.squeeze(zip_dist), tf.argmin(zip_dist, 1)) 72 | zip_sim = tf.div(tf.subtract(zip_max, zip_dist), tf.subtract(zip_max, zip_min)) 73 | address_sim = tf.subtract(1., address_dist) 74 | 75 | # Combine distance functions 76 | address_weight = 0.5 77 | zip_weight = 1. - address_weight 78 | weighted_sim = tf.add(tf.transpose(tf.multiply(address_weight, address_sim)), tf.multiply(zip_weight, zip_sim)) 79 | 80 | # Predict: Get max similarity entry 81 | top_match_index = tf.argmax(weighted_sim, 1) 82 | 83 | 84 | # Function to Create a character-sparse tensor from strings 85 | def sparse_from_word_vec(word_vec): 86 | num_words = len(word_vec) 87 | indices = [[xi, 0, yi] for xi,x in enumerate(word_vec) for yi,y in enumerate(x)] 88 | chars = list(''.join(word_vec)) 89 | return tf.SparseTensorValue(indices, chars, [num_words,1,1]) 90 | 91 | # Loop through test indices 92 | reference_addresses = [x[0] for x in reference_data] 93 | reference_zips = np.array([[x[1] for x in reference_data]]) 94 | 95 | # Create sparse address reference set 96 | sparse_ref_set = sparse_from_word_vec(reference_addresses) 97 | 98 | for i in range(n): 99 | test_address_entry = test_data[i][0] 100 | test_zip_entry = [[test_data[i][1]]] 101 | 102 | # Create sparse address vectors 103 | test_address_repeated = [test_address_entry] * n 104 | sparse_test_set = sparse_from_word_vec(test_address_repeated) 105 | 106 | feeddict = {test_address: sparse_test_set, 107 | test_zip: test_zip_entry, 108 | ref_address: sparse_ref_set, 109 | ref_zip: reference_zips} 110 | best_match = sess.run(top_match_index, feed_dict=feeddict) 111 | best_street = reference_addresses[best_match[0]] 112 | [best_zip] = reference_zips[0][best_match] 113 | [[test_zip_]] = test_zip_entry 114 | print('Address: ' + str(test_address_entry) + ', ' + str(test_zip_)) 115 | print('Match : ' + str(best_street) + ', ' + str(best_zip)) 116 | -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/06_Nearest_Neighbors_for_Image_Recognition/06_image_recognition.py: -------------------------------------------------------------------------------- 1 | # MNIST Digit Prediction with k-Nearest Neighbors 2 | #----------------------------------------------- 3 | # 4 | # This script will load the MNIST data, and split 5 | # it into test/train and perform prediction with 6 | # nearest neighbors 7 | # 8 | # For each test integer, we will return the 9 | # closest image/integer. 10 | # 11 | # Integer images are represented as 28x8 matrices 12 | # of floating point numbers 13 | 14 | import random 15 | import numpy as np 16 | import tensorflow as tf 17 | import matplotlib.pyplot as plt 18 | from PIL import Image 19 | from tensorflow.examples.tutorials.mnist import input_data 20 | from tensorflow.python.framework import ops 21 | ops.reset_default_graph() 22 | 23 | # Create graph 24 | sess = tf.Session() 25 | 26 | # Load the data 27 | mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 28 | 29 | # Random sample 30 | np.random.seed(13) # set seed for reproducibility 31 | train_size = 1000 32 | test_size = 102 33 | rand_train_indices = np.random.choice(len(mnist.train.images), train_size, replace=False) 34 | rand_test_indices = np.random.choice(len(mnist.test.images), test_size, replace=False) 35 | x_vals_train = mnist.train.images[rand_train_indices] 36 | x_vals_test = mnist.test.images[rand_test_indices] 37 | y_vals_train = mnist.train.labels[rand_train_indices] 38 | y_vals_test = mnist.test.labels[rand_test_indices] 39 | 40 | # Declare k-value and batch size 41 | k = 4 42 | batch_size=6 43 | 44 | # Placeholders 45 | x_data_train = tf.placeholder(shape=[None, 784], dtype=tf.float32) 46 | x_data_test = tf.placeholder(shape=[None, 784], dtype=tf.float32) 47 | y_target_train = tf.placeholder(shape=[None, 10], dtype=tf.float32) 48 | y_target_test = tf.placeholder(shape=[None, 10], dtype=tf.float32) 49 | 50 | # Declare distance metric 51 | # L1 52 | distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), axis=2) 53 | 54 | # L2 55 | #distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=1)) 56 | 57 | # Predict: Get min distance index (Nearest neighbor) 58 | top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k) 59 | prediction_indices = tf.gather(y_target_train, top_k_indices) 60 | # Predict the mode category 61 | count_of_predictions = tf.reduce_sum(prediction_indices, axis=1) 62 | prediction = tf.argmax(count_of_predictions, axis=1) 63 | 64 | # Calculate how many loops over training data 65 | num_loops = int(np.ceil(len(x_vals_test)/batch_size)) 66 | 67 | test_output = [] 68 | actual_vals = [] 69 | for i in range(num_loops): 70 | min_index = i*batch_size 71 | max_index = min((i+1)*batch_size,len(x_vals_train)) 72 | x_batch = x_vals_test[min_index:max_index] 73 | y_batch = y_vals_test[min_index:max_index] 74 | predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch, 75 | y_target_train: y_vals_train, y_target_test: y_batch}) 76 | test_output.extend(predictions) 77 | actual_vals.extend(np.argmax(y_batch, axis=1)) 78 | 79 | accuracy = sum([1./test_size for i in range(test_size) if test_output[i]==actual_vals[i]]) 80 | print('Accuracy on test set: ' + str(accuracy)) 81 | 82 | # Plot the last batch results: 83 | actuals = np.argmax(y_batch, axis=1) 84 | 85 | Nrows = 2 86 | Ncols = 3 87 | for i in range(len(actuals)): 88 | plt.subplot(Nrows, Ncols, i+1) 89 | plt.imshow(np.reshape(x_batch[i], [28,28]), cmap='Greys_r') 90 | plt.title('Actual: ' + str(actuals[i]) + ' Pred: ' + str(predictions[i]), 91 | fontsize=10) 92 | frame = plt.gca() 93 | frame.axes.get_xaxis().set_visible(False) 94 | frame.axes.get_yaxis().set_visible(False) 95 | 96 | plt.show() 97 | -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/02_mse_vs_variance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/02_mse_vs_variance.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/02_nn_histogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/02_nn_histogram.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/04_pred_vs_actual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/04_pred_vs_actual.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/06_nn_image_recognition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/06_nn_image_recognition.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/image.png -------------------------------------------------------------------------------- /05_Nearest_Neighbor_Methods/images/nearest_neighbor_intro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/05_Nearest_Neighbor_Methods/images/nearest_neighbor_intro.jpg -------------------------------------------------------------------------------- /06_Neural_Networks/01_Introduction/Introduction_to_Neural_Networks.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Introduction to Neural Networks\n", 8 | "\n", 9 | "-------------------\n", 10 | "\n" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "In this chapter, we will introduce neural networks and how to implement them in TensorFlow. Most of the subsequent chapters will be based on neural networks, so learning how to use them in TensorFlow is very important. We will start by introducing basic concepts of neural networking and work up to multilayer networks. In the last section we will create a neural network that learns to play Tic Tac Toe.\n", 18 | "\n", 19 | "In this chapter, we'll cover the following recipes:\n", 20 | "\n", 21 | "- Implementing Operational Gates\n", 22 | "- Working with Gates and Activation Functions\n", 23 | "- Implementing a One Layer Neural Network\n", 24 | "- Implementing Different Layers\n", 25 | "- Using Multilayer Networks\n", 26 | "- Improving Predictions of Linear Models\n", 27 | "- Learning to Play Tic Tac Toe" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "Neural networks are currently breaking records in tasks such as image and speech recognition, reading handwriting, understanding text, image segmentation, dialogue systems, autonomous car driving, and so much more. While some of these aforementioned tasks will be covered in later chapters, it is important to introduce neural networks as an easy-to-implement machine learning algorithm, so that we can expand on it later.\n", 35 | "\n", 36 | "The concept of a neural network has been around for decades. However, it only recently gained traction because we now have the computational power to train large networks because of advances in processing power, algorithm efficiency, and data sizes.\n", 37 | "\n", 38 | "A neural network is basically a sequence of operations applied to a matrix of input data. These operations are usually collections of additions and multiplications followed by applications of non-linear functions. One example that we have already seen is logistic regression, the last section in Chapter 3, Linear Regression. Logistic regression is the sum of the partial slope-feature products followed by the application of the sigmoid function, which is non-linear. Neural networks generalize this a bit more by allowing any combination of operations and non-linear functions, which includes the applications of absolute value, maximum, minimum, and so on.\n", 39 | "\n", 40 | "The important trick with neural networks is called 'back propagation'. Back propagation is a procedure that allows us to update the model variables based on the learning rate and the output of the loss function. We used back propagation to update our model variables in the Chapter 3, Linear Regression and Chapter 4, and the Support Vector Machine chapter.\n", 41 | "\n", 42 | "Another important feature to take note of in neural networks is the non-linear activation function. Since most neural networks are just combinations of addition and multiplication operations, they will not be able to model non-linear data sets. To address this issue, we have used the non-linear activation functions in the neural networks. This will allow the neural network to adapt to most non-linear situations.\n", 43 | "\n", 44 | "It is important to remember that, like most of the algorithms we have seen so far, neural networks are sensitive to the hyper-parameters that we choose. In this chapter, we will see the impact of different learning rates, loss functions, and optimization procedures.\n", 45 | "\n", 46 | "There are more resources for learning about neural networks that are more in depth and detailed. Here are some following resources:\n", 47 | "\n", 48 | "- The seminal paper describing back propagation is Efficient Back Prop by Yann LeCun et. al. The PDF is located here: http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n", 49 | "\n", 50 | "- CS231, Convolutional Neural Networks for Visual Recognition, by Stanford University, class resources available here: http://cs231n.stanford.edu/\n", 51 | "\n", 52 | "- CS224d, Deep Learning for Natural Language Processing, by Stanford University, class resources available here: http://cs224d.stanford.edu/\n", 53 | "\n", 54 | "- Deep Learning, a book by the MIT Press. Goodfellow, et. al. 2016. Located: http://www.deeplearningbook.org\n", 55 | "\n", 56 | "- There is an online book called Neural Networks and Deep Learning by Michael Nielsen, located here: http://neuralnetworksanddeeplearning.com/\n", 57 | "\n", 58 | "- For a more pragmatic approach and introduction to neural networks, Andrej Karpathy has written a great summary and JavaScript examples called A Hacker's Guide to Neural Networks. The write up is located here: http://karpathy.github.io/neuralnets/\n", 59 | "\n", 60 | "- Another site that summarizes some good notes on deep learning is called Deep Learning for Beginners by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. This web page can be found here: http://randomekek.github.io/deep/deeplearning.html" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "collapsed": true 68 | }, 69 | "outputs": [], 70 | "source": [] 71 | } 72 | ], 73 | "metadata": { 74 | "kernelspec": { 75 | "display_name": "Python 3", 76 | "language": "python", 77 | "name": "python3" 78 | }, 79 | "language_info": { 80 | "codemirror_mode": { 81 | "name": "ipython", 82 | "version": 3 83 | }, 84 | "file_extension": ".py", 85 | "mimetype": "text/x-python", 86 | "name": "python", 87 | "nbconvert_exporter": "python", 88 | "pygments_lexer": "ipython3", 89 | "version": "3.6.1" 90 | } 91 | }, 92 | "nbformat": 4, 93 | "nbformat_minor": 0 94 | } 95 | -------------------------------------------------------------------------------- /06_Neural_Networks/02_Implementing_an_Operational_Gate/02_gates.py: -------------------------------------------------------------------------------- 1 | # Implementing Gates 2 | #---------------------------------- 3 | # 4 | # This function shows how to implement 5 | # various gates in TensorFlow 6 | # 7 | # One gate will be one operation with 8 | # a variable and a placeholder. 9 | # We will ask TensorFlow to change the 10 | # variable based on our loss function 11 | 12 | import tensorflow as tf 13 | from tensorflow.python.framework import ops 14 | ops.reset_default_graph() 15 | 16 | # Start Graph Session 17 | sess = tf.Session() 18 | 19 | #---------------------------------- 20 | # Create a multiplication gate: 21 | # f(x) = a * x 22 | # 23 | # a -- 24 | # | 25 | # |---- (multiply) --> output 26 | # x --| 27 | # 28 | 29 | a = tf.Variable(tf.constant(4.)) 30 | x_val = 5. 31 | x_data = tf.placeholder(dtype=tf.float32) 32 | 33 | multiplication = tf.multiply(a, x_data) 34 | 35 | # Declare the loss function as the difference between 36 | # the output and a target value, 50. 37 | loss = tf.square(tf.subtract(multiplication, 50.)) 38 | 39 | # Initialize variables 40 | init = tf.global_variables_initializer() 41 | sess.run(init) 42 | 43 | # Declare optimizer 44 | my_opt = tf.train.GradientDescentOptimizer(0.01) 45 | train_step = my_opt.minimize(loss) 46 | 47 | # Run loop across gate 48 | print('Optimizing a Multiplication Gate Output to 50.') 49 | for _ in range(10): 50 | sess.run(train_step, feed_dict={x_data: x_val}) 51 | a_val = sess.run(a) 52 | mult_output = sess.run(multiplication, feed_dict={x_data: x_val}) 53 | print(str(a_val) + ' * ' + str(x_val) + ' = ' + str(mult_output)) 54 | 55 | ''' 56 | Create a nested gate: 57 | f(x) = a * x + b 58 | 59 | a -- 60 | | 61 | |-- (multiply)-- 62 | x --| | 63 | |-- (add) --> output 64 | b --| 65 | 66 | ''' 67 | 68 | # Start a New Graph Session 69 | ops.reset_default_graph() 70 | sess = tf.Session() 71 | 72 | a = tf.Variable(tf.constant(1.)) 73 | b = tf.Variable(tf.constant(1.)) 74 | x_val = 5. 75 | x_data = tf.placeholder(dtype=tf.float32) 76 | 77 | two_gate = tf.add(tf.multiply(a, x_data), b) 78 | 79 | # Declare the loss function as the difference between 80 | # the output and a target value, 50. 81 | loss = tf.square(tf.subtract(two_gate, 50.)) 82 | 83 | # Initialize variables 84 | init = tf.global_variables_initializer() 85 | sess.run(init) 86 | 87 | # Declare optimizer 88 | my_opt = tf.train.GradientDescentOptimizer(0.01) 89 | train_step = my_opt.minimize(loss) 90 | 91 | # Run loop across gate 92 | print('\nOptimizing Two Gate Output to 50.') 93 | for _ in range(10): 94 | sess.run(train_step, feed_dict={x_data: x_val}) 95 | a_val, b_val = (sess.run(a), sess.run(b)) 96 | two_gate_output = sess.run(two_gate, feed_dict={x_data: x_val}) 97 | print(str(a_val) + ' * ' + str(x_val) + ' + ' + str(b_val) + ' = ' + str(two_gate_output)) -------------------------------------------------------------------------------- /06_Neural_Networks/03_Working_with_Activation_Functions/03_activation_functions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Combining Gates and Activation Functions 3 | 4 | This function shows how to implement 5 | various gates with activation functions 6 | in TensorFlow 7 | 8 | This function is an extension of the 9 | prior gates, but with various activation 10 | functions. 11 | """ 12 | 13 | import tensorflow as tf 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | from tensorflow.python.framework import ops 17 | ops.reset_default_graph() 18 | 19 | # Start Graph Session 20 | sess = tf.Session() 21 | tf.set_random_seed(5) 22 | np.random.seed(42) 23 | 24 | batch_size = 50 25 | 26 | a1 = tf.Variable(tf.random_normal(shape=[1, 1])) 27 | b1 = tf.Variable(tf.random_uniform(shape=[1, 1])) 28 | a2 = tf.Variable(tf.random_normal(shape=[1, 1])) 29 | b2 = tf.Variable(tf.random_uniform(shape=[1, 1])) 30 | x = np.random.normal(2, 0.1, 500) 31 | x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) 32 | 33 | sigmoid_activation = tf.sigmoid(tf.add(tf.matmul(x_data, a1), b1)) 34 | 35 | relu_activation = tf.nn.relu(tf.add(tf.matmul(x_data, a2), b2)) 36 | 37 | # Declare the loss function as the difference between 38 | # the output and a target value, 0.75. 39 | loss1 = tf.reduce_mean(tf.square(tf.subtract(sigmoid_activation, 0.75))) 40 | loss2 = tf.reduce_mean(tf.square(tf.subtract(relu_activation, 0.75))) 41 | 42 | # Initialize variables 43 | init = tf.global_variables_initializer() 44 | sess.run(init) 45 | 46 | # Declare optimizer 47 | my_opt = tf.train.GradientDescentOptimizer(0.01) 48 | train_step_sigmoid = my_opt.minimize(loss1) 49 | train_step_relu = my_opt.minimize(loss2) 50 | 51 | # Run loop across gate 52 | print('\nOptimizing Sigmoid AND Relu Output to 0.75') 53 | loss_vec_sigmoid = [] 54 | loss_vec_relu = [] 55 | for i in range(500): 56 | rand_indices = np.random.choice(len(x), size=batch_size) 57 | x_vals = np.transpose([x[rand_indices]]) 58 | sess.run(train_step_sigmoid, feed_dict={x_data: x_vals}) 59 | sess.run(train_step_relu, feed_dict={x_data: x_vals}) 60 | 61 | loss_vec_sigmoid.append(sess.run(loss1, feed_dict={x_data: x_vals})) 62 | loss_vec_relu.append(sess.run(loss2, feed_dict={x_data: x_vals})) 63 | 64 | sigmoid_output = np.mean(sess.run(sigmoid_activation, feed_dict={x_data: x_vals})) 65 | relu_output = np.mean(sess.run(relu_activation, feed_dict={x_data: x_vals})) 66 | 67 | if i % 50 == 0: 68 | print('sigmoid = ' + str(np.mean(sigmoid_output)) + ' relu = ' + str(np.mean(relu_output))) 69 | 70 | # Plot the loss 71 | plt.plot(loss_vec_sigmoid, 'k-', label='Sigmoid Activation') 72 | plt.plot(loss_vec_relu, 'r--', label='Relu Activation') 73 | plt.ylim([0, 1.0]) 74 | plt.title('Loss per Generation') 75 | plt.xlabel('Generation') 76 | plt.ylabel('Loss') 77 | plt.legend(loc='upper right') 78 | plt.show() 79 | -------------------------------------------------------------------------------- /06_Neural_Networks/04_Single_Hidden_Layer_Network/04_single_hidden_layer_network.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementing a one-layer Neural Network 3 | 4 | We will illustrate how to create a one hidden layer NN 5 | 6 | We will use the iris data for this exercise 7 | 8 | We will build a one-hidden layer neural network 9 | to predict the fourth attribute, Petal Width from 10 | the other three (Sepal length, Sepal width, Petal length). 11 | """ 12 | 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | import tensorflow as tf 16 | from sklearn import datasets 17 | from tensorflow.python.framework import ops 18 | ops.reset_default_graph() 19 | 20 | iris = datasets.load_iris() 21 | x_vals = np.array([x[0:3] for x in iris.data]) 22 | y_vals = np.array([x[3] for x in iris.data]) 23 | 24 | # Create graph session 25 | sess = tf.Session() 26 | 27 | # make results reproducible 28 | seed = 2 29 | tf.set_random_seed(seed) 30 | np.random.seed(seed) 31 | 32 | # Split data into train/test = 80%/20% 33 | train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False) 34 | test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) 35 | x_vals_train = x_vals[train_indices] 36 | x_vals_test = x_vals[test_indices] 37 | y_vals_train = y_vals[train_indices] 38 | y_vals_test = y_vals[test_indices] 39 | 40 | 41 | # Normalize by column (min-max norm) 42 | def normalize_cols(m): 43 | col_max = m.max(axis=0) 44 | col_min = m.min(axis=0) 45 | return (m-col_min) / (col_max - col_min) 46 | 47 | x_vals_train = np.nan_to_num(normalize_cols(x_vals_train)) 48 | x_vals_test = np.nan_to_num(normalize_cols(x_vals_test)) 49 | 50 | # Declare batch size 51 | batch_size = 50 52 | 53 | # Initialize placeholders 54 | x_data = tf.placeholder(shape=[None, 3], dtype=tf.float32) 55 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 56 | 57 | # Create variables for both NN layers 58 | hidden_layer_nodes = 10 59 | A1 = tf.Variable(tf.random_normal(shape=[3, hidden_layer_nodes])) # inputs -> hidden nodes 60 | b1 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes])) # one biases for each hidden node 61 | A2 = tf.Variable(tf.random_normal(shape=[hidden_layer_nodes, 1])) # hidden inputs -> 1 output 62 | b2 = tf.Variable(tf.random_normal(shape=[1])) # 1 bias for the output 63 | 64 | # Declare model operations 65 | hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data, A1), b1)) 66 | final_output = tf.nn.relu(tf.add(tf.matmul(hidden_output, A2), b2)) 67 | 68 | # Declare loss function (MSE) 69 | loss = tf.reduce_mean(tf.square(y_target - final_output)) 70 | 71 | # Declare optimizer 72 | my_opt = tf.train.GradientDescentOptimizer(0.005) 73 | train_step = my_opt.minimize(loss) 74 | 75 | # Initialize variables 76 | init = tf.global_variables_initializer() 77 | sess.run(init) 78 | 79 | # Training loop 80 | loss_vec = [] 81 | test_loss = [] 82 | for i in range(500): 83 | rand_index = np.random.choice(len(x_vals_train), size=batch_size) 84 | rand_x = x_vals_train[rand_index] 85 | rand_y = np.transpose([y_vals_train[rand_index]]) 86 | sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) 87 | 88 | temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) 89 | loss_vec.append(np.sqrt(temp_loss)) 90 | 91 | test_temp_loss = sess.run(loss, feed_dict={x_data: x_vals_test, y_target: np.transpose([y_vals_test])}) 92 | test_loss.append(np.sqrt(test_temp_loss)) 93 | if (i + 1) % 50 == 0: 94 | print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss)) 95 | 96 | # Plot loss (MSE) over time 97 | plt.plot(loss_vec, 'k-', label='Train Loss') 98 | plt.plot(test_loss, 'r--', label='Test Loss') 99 | plt.title('Loss (MSE) per Generation') 100 | plt.legend(loc='upper right') 101 | plt.xlabel('Generation') 102 | plt.ylabel('Loss') 103 | plt.show() 104 | -------------------------------------------------------------------------------- /06_Neural_Networks/08_Learning_Tic_Tac_Toe/base_tic_tac_toe_moves.csv: -------------------------------------------------------------------------------- 1 | 0,0,0,0,-1,0,0,0,0,0 2 | 0,-1,0,0,0,0,0,0,0,0 3 | 0,0,0,0,0,-1,0,0,0,6 4 | -1,0,0,0,0,0,0,0,0,4 5 | 0,0,0,0,0,0,1,-1,-1,3 6 | 0,-1,0,0,1,0,0,0,-1,0 7 | 0,-1,1,0,0,-1,0,0,0,7 8 | -1,0,0,0,-1,0,0,0,1,6 9 | 0,0,1,0,0,-1,-1,0,0,4 10 | 0,0,-1,0,0,0,0,-1,1,4 11 | 1,0,0,-1,0,0,0,-1,0,2 12 | 0,0,-1,0,1,0,-1,0,0,5 13 | -1,0,0,1,-1,-1,0,0,1,6 14 | -1,1,-1,0,1,0,0,1,0,8 15 | 0,0,0,-1,0,1,1,-1,-1,1 16 | -1,1,0,0,0,-1,0,-1,1,3 17 | 0,-1,1,0,1,-1,-1,0,0,8 18 | 0,0,-1,1,0,-1,0,-1,1,0 19 | 1,-1,0,0,-1,0,0,0,0,7 20 | 1,0,-1,0,-1,0,0,0,0,6 21 | 1,0,0,0,-1,0,-1,0,0,2 22 | 1,0,0,0,-1,-1,0,0,0,3 23 | 1,0,0,0,-1,0,0,0,-1,6 24 | 1,-1,0,-1,-1,0,0,1,0,5 25 | 1,-1,0,0,-1,0,-1,1,0,2 26 | 1,-1,-1,0,-1,0,0,1,0,6 27 | 1,-1,0,0,-1,-1,0,1,0,3 28 | 1,0,-1,-1,-1,0,1,0,0,8 29 | 1,-1,1,0,-1,0,-1,0,0,7 30 | 1,0,0,1,-1,-1,-1,0,0,2 31 | 1,0,0,-1,-1,0,1,0,-1,5 32 | -------------------------------------------------------------------------------- /06_Neural_Networks/images/02_operational_gates.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/02_operational_gates.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/03_activation1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/03_activation1.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/03_activation2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/03_activation2.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/04_nn_layout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/04_nn_layout.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/04_nn_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/04_nn_loss.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/06_nn_multiple_layers_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/06_nn_multiple_layers_loss.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/07_lin_reg_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/07_lin_reg_acc.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/07_lin_reg_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/07_lin_reg_loss.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/08_tic_tac_toe_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/08_tic_tac_toe_architecture.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/08_tictactoe_layout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/08_tictactoe_layout.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/08_tictactoe_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/08_tictactoe_loss.png -------------------------------------------------------------------------------- /06_Neural_Networks/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/06_Neural_Networks/images/image.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/01_Introduction/01_introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Natural Language Processing (NLP) Introduction\n", 8 | "-------------------\n", 9 | "\n", 10 | "In this chapter we cover the following topics:\n", 11 | " - Working with Bag of Words\n", 12 | " - Implementing TF-IDF\n", 13 | " - Working with Skip-gram Embeddings\n", 14 | " - Working with CBOW Embeddings\n", 15 | " - Making Predictions with Word2vec\n", 16 | " - Using Doc2vec for Sentiment Analysis" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "Up to this point, we have only considered machine learning algorithms that mostly operate on numerical inputs. If we want to use text, we must find a way to convert the text into numbers. There are many ways to do this and we will explore a few common ways this is achieved.\n", 24 | "\n", 25 | "If we consider the sentence **“tensorflow makes machine learning easy”**, we could convert the words to numbers in the order that we observe them. This would make the sentence become “1 2 3 4 5”. Then when we see a new sentence, **“machine learning is easy”**, we can translate this as “3 4 0 5”. Denoting words we haven’t seen bore with an index of zero. With these two examples, we have limited our vocabulary to 6 numbers. With large texts we can choose how many words we want to keep, and usually keep the most frequent words, labeling everything else with the index of zero.\n", 26 | "\n", 27 | "If the word “learning” has a numerical value of 4, and the word “makes” has a numerical value of 2, then it would be natural to assume that “learning” is twice “makes”. Since we do not want this type of numerical relationship between words, we assume these numbers represent categories and not relational numbers.\n", 28 | "Another problem is that these two sentences are of different size. Each observation we make (sentences in this case) need to have the same size input to a model we wish to create. To get around this, we create each sentence into a sparse vector that has that value of one in a specific index if that word occurs in that index." 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "| word --> | tensorflow | makes | machine | learning | easy |\n", 36 | "|:----:|:-----:|:-----:|:-----:|:-----:|:-----:|\n", 37 | "| word index --> | 1 | 2 | 3 | 4 | 5 |" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "The occurrence vector would then be:\n", 45 | "\n", 46 | " sentence1 = [0, 1, 1, 1, 1, 1]\n", 47 | "\n", 48 | "This is a vector of length 6 because we have 5 words in our vocabulary and we reserve the 0-th index for unknown or rare words\n", 49 | "\n", 50 | "Now consider the sentence, **'machine learning is easy'**." 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "| word --> | machine | learning | is | easy |\n", 58 | "|:----:|:-----:|:-----:|:-----:|:-----:|\n", 59 | "| word index --> | 3 | 4 | 0 | 5 |" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "The occurrence vector for this sentence is now:\n", 67 | "\n", 68 | " sentence2 = [1, 0, 0, 1, 1, 1]\n", 69 | "\n", 70 | "Notice that we now have a procedure that converts any sentence to a fixed length numerical vector." 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "A disadvantage to this method is that we lose any indication of word order. The two sentences “tensorflow makes machine learning easy” and “machine learning makes tensorflow easy” would result in the same sentence vector.\n", 78 | "It is also worthwhile to note that the length of these vectors is equal to the size of our vocabulary that we pick. \n", 79 | "It is common to pick a very large vocabulary, so these sentence vectors can be very sparse. This type of embedding that we have covered in this introduction is called “bag of words”. We will implement this in the next section.\n", 80 | "\n", 81 | "Another drawback is that the words “is” and “tensorflow\" have the same numerical index value of one. We can imagine that the word “is” might be less important that the occurrence of the word “tensorflow\".\n", 82 | "We will explore different types of embeddings in this chapter that attempt to address these ideas, but first we start with an implementation of bag of words." 83 | ] 84 | } 85 | ], 86 | "metadata": { 87 | "kernelspec": { 88 | "display_name": "Python 3", 89 | "language": "python", 90 | "name": "python3" 91 | }, 92 | "language_info": { 93 | "codemirror_mode": { 94 | "name": "ipython", 95 | "version": 3.0 96 | }, 97 | "file_extension": ".py", 98 | "mimetype": "text/x-python", 99 | "name": "python", 100 | "nbconvert_exporter": "python", 101 | "pygments_lexer": "ipython3", 102 | "version": "3.5.2" 103 | } 104 | }, 105 | "nbformat": 4, 106 | "nbformat_minor": 0 107 | } -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/02_bag_of_words.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/02_bag_of_words.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/03_tfidf_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/03_tfidf_acc.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/03_tfidf_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/03_tfidf_loss.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/04_skipgram_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/04_skipgram_model.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/05_cbow_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/05_cbow_model.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/06_word2vec_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/06_word2vec_acc.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/06_word2vec_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/06_word2vec_loss.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/07_sentiment_doc2vec_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/07_sentiment_doc2vec_loss.png -------------------------------------------------------------------------------- /07_Natural_Language_Processing/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/07_Natural_Language_Processing/images/image.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/04_Retraining_Current_Architectures/04_download_cifar10.py: -------------------------------------------------------------------------------- 1 | # Download/Saving CIFAR-10 images in Inception format 2 | #--------------------------------------- 3 | # 4 | # In this script, we download the CIFAR-10 images and 5 | # transform/save them in the Inception Retraining Format 6 | # 7 | # The end purpose of the files is for re-training the 8 | # Google Inception tensorflow model to work on the CIFAR-10. 9 | 10 | import os 11 | import tarfile 12 | import _pickle as cPickle 13 | import numpy as np 14 | import urllib.request 15 | import scipy.misc 16 | from tensorflow.python.framework import ops 17 | ops.reset_default_graph() 18 | 19 | cifar_link = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' 20 | data_dir = 'temp' 21 | if not os.path.isdir(data_dir): 22 | os.makedirs(data_dir) 23 | 24 | # Download tar file 25 | target_file = os.path.join(data_dir, 'cifar-10-python.tar.gz') 26 | if not os.path.isfile(target_file): 27 | print('CIFAR-10 file not found. Downloading CIFAR data (Size = 163MB)') 28 | print('This may take a few minutes, please wait.') 29 | filename, headers = urllib.request.urlretrieve(cifar_link, target_file) 30 | 31 | # Extract into memory 32 | tar = tarfile.open(target_file) 33 | tar.extractall(path=data_dir) 34 | tar.close() 35 | objects = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] 36 | 37 | # Create train image folders 38 | train_folder = 'train_dir' 39 | if not os.path.isdir(os.path.join(data_dir, train_folder)): 40 | for i in range(10): 41 | folder = os.path.join(data_dir, train_folder, objects[i]) 42 | os.makedirs(folder) 43 | # Create test image folders 44 | test_folder = 'validation_dir' 45 | if not os.path.isdir(os.path.join(data_dir, test_folder)): 46 | for i in range(10): 47 | folder = os.path.join(data_dir, test_folder, objects[i]) 48 | os.makedirs(folder) 49 | 50 | # Extract images accordingly 51 | data_location = os.path.join(data_dir, 'cifar-10-batches-py') 52 | train_names = ['data_batch_' + str(x) for x in range(1,6)] 53 | test_names = ['test_batch'] 54 | 55 | 56 | def load_batch_from_file(file): 57 | file_conn = open(file, 'rb') 58 | image_dictionary = cPickle.load(file_conn, encoding='latin1') 59 | file_conn.close() 60 | return image_dictionary 61 | 62 | 63 | def save_images_from_dict(image_dict, folder='data_dir'): 64 | # image_dict.keys() = 'labels', 'filenames', 'data', 'batch_label' 65 | for ix, label in enumerate(image_dict['labels']): 66 | folder_path = os.path.join(data_dir, folder, objects[label]) 67 | filename = image_dict['filenames'][ix] 68 | #Transform image data 69 | image_array = image_dict['data'][ix] 70 | image_array.resize([3, 32, 32]) 71 | # Save image 72 | output_location = os.path.join(folder_path, filename) 73 | scipy.misc.imsave(output_location,image_array.transpose()) 74 | 75 | # Sort train images 76 | for file in train_names: 77 | print('Saving images from file: {}'.format(file)) 78 | file_location = os.path.join(data_dir, 'cifar-10-batches-py', file) 79 | image_dict = load_batch_from_file(file_location) 80 | save_images_from_dict(image_dict, folder=train_folder) 81 | 82 | # Sort test images 83 | for file in test_names: 84 | print('Saving images from file: {}'.format(file)) 85 | file_location = os.path.join(data_dir, 'cifar-10-batches-py', file) 86 | image_dict = load_batch_from_file(file_location) 87 | save_images_from_dict(image_dict, folder=test_folder) 88 | 89 | # Create labels file 90 | cifar_labels_file = os.path.join(data_dir,'cifar10_labels.txt') 91 | print('Writing labels file, {}'.format(cifar_labels_file)) 92 | with open(cifar_labels_file, 'w') as labels_file: 93 | for item in objects: 94 | labels_file.write("{}\n".format(item)) 95 | 96 | # After this is done, we proceed with the TensorFlow fine-tuning tutorial. 97 | 98 | # https://www.tensorflow.org/tutorials/image_retraining 99 | -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/06_Deepdream/book_cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/06_Deepdream/book_cover.jpg -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/01_intro_cnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/01_intro_cnn.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/01_intro_cnn2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/01_intro_cnn2.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/02_cnn1_loss_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/02_cnn1_loss_acc.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/02_cnn1_mnist_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/02_cnn1_mnist_output.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/03_cnn2_loss_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/03_cnn2_loss_acc.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/05_stylenet_ex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/05_stylenet_ex.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/06_deepdream_ex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/06_deepdream_ex.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/book_cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/book_cover.jpg -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/image.png -------------------------------------------------------------------------------- /08_Convolutional_Neural_Networks/images/starry_night.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/08_Convolutional_Neural_Networks/images/starry_night.jpg -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/01_Introduction/readme.md: -------------------------------------------------------------------------------- 1 | # Introduction To RNNs in TensorFlow 2 | 3 | Placeholder for future purposes 4 | -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/01_RNN_Seq2Seq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/01_RNN_Seq2Seq.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/01_RNN_Single_Target.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/01_RNN_Single_Target.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/02_RNN_Spam_Acc_Loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/02_RNN_Spam_Acc_Loss.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/03_LSTM_Loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/03_LSTM_Loss.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/04_MultipleLSTM_Loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/04_MultipleLSTM_Loss.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/04_MultipleRNN_Architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/04_MultipleRNN_Architecture.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/05_Seq2Seq_Loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/05_Seq2Seq_Loss.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/06_Similarity_RNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/06_Similarity_RNN.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/06_Similarity_RNN_Architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/06_Similarity_RNN_Architecture.png -------------------------------------------------------------------------------- /09_Recurrent_Neural_Networks/images/06_Similarity_RNN_Diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/09_Recurrent_Neural_Networks/images/06_Similarity_RNN_Diagram.png -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/02_Using_Multiple_Devices/02_using_multiple_devices.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Using Multiple Devices 3 | #---------------------------------- 4 | # 5 | # This function gives us the ways to use 6 | # multiple devices (executors) in TensorFlow. 7 | 8 | import tensorflow as tf 9 | from tensorflow.python.framework import ops 10 | ops.reset_default_graph() 11 | 12 | # To find out where placement occurs, set 'log_device_placement' 13 | sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 14 | 15 | a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') 16 | b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') 17 | c = tf.matmul(a, b) 18 | 19 | # Runs the op. 20 | print(sess.run(c)) 21 | 22 | 23 | # If we load a graph and want device placement to be forgotten, 24 | # we set a parameter in our session: 25 | config = tf.ConfigProto() 26 | config.allow_soft_placement = True 27 | sess_soft = tf.Session(config=config) 28 | 29 | # GPUs 30 | #--------------------------------- 31 | # Note that the GPU must have a compute capability > 3.5 for TF to use. 32 | # http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capability 33 | 34 | 35 | # Careful with GPU memory allocation, TF never releases it. TF starts with almost 36 | # all of the GPU memory allocated. We can slowly grow to that limit with an 37 | # option setting: 38 | 39 | config.gpu_options.allow_growth = True 40 | sess_grow = tf.Session(config=config) 41 | 42 | # Also, we can limit the size of GPU memory used, with the following option 43 | config.gpu_options.per_process_gpu_memory_fraction = 0.4 44 | sess_limited = tf.Session(config=config) 45 | 46 | 47 | # How to set placements on multiple devices. 48 | # Here, assume we have three devies CPU:0, GPU:0, and GPU:1 49 | if tf.test.is_built_with_cuda(): 50 | with tf.device('/cpu:0'): 51 | a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3]) 52 | b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1]) 53 | 54 | with tf.device('/gpu:1'): 55 | c = tf.matmul(a,b) 56 | c = tf.reshape(c, [-1]) 57 | 58 | with tf.device('/gpu:2'): 59 | d = tf.matmul(b,a) 60 | flat_d = tf.reshape(d, [-1]) 61 | 62 | combined = tf.multiply(c, flat_d) 63 | print(sess.run(combined)) 64 | -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/03_Parallelizing_TensorFlow/03_parallelizing_tensorflow.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Parallelizing TensorFlow 3 | #---------------------------------- 4 | # 5 | # We will show how to use TensorFlow distributed 6 | 7 | import tensorflow as tf 8 | 9 | # We will setup a local cluster (on localhost) 10 | 11 | # Cluster for 2 local workers (tasks 0 and 1): 12 | cluster = tf.train.ClusterSpec({'local': ['localhost:2222', 'localhost:2223']}) 13 | # Server definition: 14 | server = tf.train.Server(cluster, job_name="local", task_index=0) 15 | server = tf.train.Server(cluster, job_name="local", task_index=1) 16 | # Finish and add 17 | # server.join() 18 | 19 | # Have each worker do a task 20 | # Worker 0 : create matrices 21 | # Worker 1 : calculate sum of all elements 22 | mat_dim = 25 23 | matrix_list = {} 24 | 25 | with tf.device('/job:local/task:0'): 26 | for i in range(0, 2): 27 | m_label = 'm_{}'.format(i) 28 | matrix_list[m_label] = tf.random_normal([mat_dim, mat_dim]) 29 | 30 | # Have each worker calculate the Cholesky Decomposition 31 | sum_outs = {} 32 | with tf.device('/job:local/task:1'): 33 | for i in range(0, 2): 34 | A = matrix_list['m_{}'.format(i)] 35 | sum_outs['m_{}'.format(i)] = tf.reduce_sum(A) 36 | 37 | # Sum all the cholesky decompositions 38 | summed_out = tf.add_n(list(sum_outs.values())) 39 | 40 | with tf.Session(server.target) as sess: 41 | result = sess.run(summed_out) 42 | print('Summed Values:{}'.format(result)) 43 | -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/04_Production_Tips/04_production_tips_for_tf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Tips for TensorFlow to Production 3 | #---------------------------------- 4 | # 5 | # Various Tips for Taking TensorFlow to Production 6 | 7 | ############################################ 8 | # 9 | # THIS SCRIPT IS NOT RUNNABLE. 10 | # -it only contains tips for production code 11 | # 12 | ############################################ 13 | 14 | # Also you can clear the default graph from memory 15 | import tensorflow as tf 16 | from tensorflow.python.framework import ops 17 | ops.reset_default_graph() 18 | 19 | # Saving Models 20 | # File types created from saving: 21 | # checkpoint file: Holds info on where the most recent models are 22 | # events file: Strictly for viewing graph in Tensorboard 23 | # pbtxt file: Textual protobufs file (uncompressed), used for debugging 24 | # chkp file: Holds data and model weights (large file) 25 | # meta chkp files: Model Graph and Meta-data (learning rate and operations) 26 | 27 | 28 | # Saving data pipeline structures (vocabulary, ) 29 | word_list = ['to', 'be', 'or', 'not', 'to', 'be'] 30 | vocab_list = list(set(word_list)) 31 | vocab2ix_dict = dict(zip(vocab_list, range(len(vocab_list)))) 32 | ix2vocab_dict = {val:key for key,val in vocab2ix_dict.items()} 33 | 34 | # Save vocabulary 35 | import json 36 | with open('vocab2ix_dict.json', 'w') as file_conn: 37 | json.dump(vocab2ix_dict, file_conn) 38 | 39 | # Load vocabulary 40 | with open('vocab2ix_dict.json', 'r') as file_conn: 41 | vocab2ix_dict = json.load(file_conn) 42 | 43 | # After model declaration, add a saving operations 44 | saver = tf.train.Saver() 45 | # Then during training, save every so often, referencing the training generation 46 | for i in range(generations): 47 | ... 48 | if i%save_every == 0: 49 | saver.save(sess, 'my_model', global_step=step) 50 | 51 | # Can also save only specific variables: 52 | saver = tf.train.Saver({"my_var": my_variable}) 53 | 54 | 55 | # other options for saver are 'keep checkpoint_every_n_hours' 56 | # also 'max_to_keep'= default 5. 57 | 58 | # Be sure to name operations, and variables for easy loading for referencing later 59 | conv_weights = tf.Variable(tf.random_normal(), name='conv_weights') 60 | loss = tf.reduce_mean(... , name='loss') 61 | 62 | # Instead of tyring argparse and main(), TensorFlow provides an 'app' function 63 | # to handle running and loading of arguments 64 | 65 | # At the beginning of the file, define the flags. 66 | tf.flags.DEFINE_string("worker_locations", "", "List of worker addresses.") 67 | tf.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') 68 | tf.flags.DEFINE_integer('generations', 1000, 'Number of training generations.') 69 | tf.flags.DEFINE_boolean('run_unit_tests', False, 'If true, run tests.') 70 | FLAGS = tf.flags.FLAGS 71 | 72 | # Need to define a 'main' function for the app to run 73 | def main(_): 74 | worker_ips = FLAGS.worker_locations.split(",") 75 | learning_rate = FLAGS.learning_rate 76 | generations = FLAGS.generations 77 | run_unit_tests = FLAGS.run_unit_tests 78 | 79 | # Run the TensorFlow app 80 | if __name__ == "__main__": 81 | # The following is looking for a "main()" function to run and will pass. 82 | tf.app.run() 83 | # Can modify this to be more custom: 84 | tf.app.run(main=my_main_function(), argv=my_arguments) 85 | 86 | 87 | # Use of TensorFlow's built in logging: 88 | # Five levels: DEBUG, INFO, WARN, ERROR, and FATAL 89 | tf.logging.set_verbosity(tf.logging.WARN) 90 | # WARN is the default value, but to see more information, you can set it to 91 | # INFO or DEBUG 92 | tf.logging.set_verbosity(tf.logging.DEBUG) 93 | # Note: 'DEBUG' is quite verbose. 94 | 95 | -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/05_Production_Example/05_production_ex_eval.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # TensorFlow Production Example (Evaluating) 3 | #---------------------------------- 4 | # 5 | # We pull together everything and create an example 6 | # of best tensorflow production tips 7 | # 8 | # The example we will productionalize is the spam/ham RNN 9 | # from the RNN Chapter. 10 | 11 | import os 12 | import re 13 | import numpy as np 14 | import tensorflow as tf 15 | from tensorflow.python.framework import ops 16 | ops.reset_default_graph() 17 | 18 | tf.flags.DEFINE_string("storage_folder", "temp", "Where to store model and data.") 19 | tf.flags.DEFINE_bool('model_file', False, 'Model file location.') 20 | tf.flags.DEFINE_bool('run_unit_tests', False, 'If true, run tests.') 21 | FLAGS = tf.flags.FLAGS 22 | 23 | 24 | # Create a text cleaning function 25 | def clean_text(text_string): 26 | text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string) 27 | text_string = " ".join(text_string.split()) 28 | text_string = text_string.lower() 29 | return text_string 30 | 31 | 32 | # Load vocab processor 33 | def load_vocab(): 34 | vocab_path = os.path.join(FLAGS.storage_folder, "vocab") 35 | vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(vocab_path) 36 | return vocab_processor 37 | 38 | 39 | # Process input data: 40 | def process_data(input_data, vocab_processor): 41 | input_data = clean_text(input_data) 42 | input_data = input_data.split() 43 | processed_input = np.array(list(vocab_processor.transform(input_data))) 44 | return processed_input 45 | 46 | 47 | # Get input function 48 | def get_input_data(): 49 | """ 50 | For this function, we just prompt the user for a text message to evaluate 51 | But this function could also potentially read a file in as well. 52 | """ 53 | input_text = input("Please enter a text message to evaluate: ") 54 | vocab_processor = load_vocab() 55 | return process_data(input_text, vocab_processor) 56 | 57 | 58 | # Test clean_text function 59 | class clean_test(tf.test.TestCase): 60 | # Make sure cleaning function behaves correctly 61 | def clean_string_test(self): 62 | with self.test_session(): 63 | test_input = '--TensorFlow\'s so Great! Don\t you think so? ' 64 | test_expected = 'tensorflows so great don you think so' 65 | test_out = clean_text(test_input) 66 | self.assertEqual(test_expected, test_out) 67 | 68 | 69 | # Main function 70 | def main(args): 71 | # Get flags 72 | storage_folder = FLAGS.storage_folder 73 | 74 | # Get user input text 75 | x_data = get_input_data() 76 | 77 | # Load model 78 | graph = tf.Graph() 79 | with graph.as_default(): 80 | sess = tf.Session() 81 | with sess.as_default(): 82 | # Load the saved meta graph and restore variables 83 | saver = tf.train.import_meta_graph("{}.meta".format(os.path.join(storage_folder, "model.ckpt"))) 84 | saver.restore(sess, os.path.join(storage_folder, "model.ckpt")) 85 | 86 | # Get the placeholders from the graph by name 87 | x_data_ph = graph.get_operation_by_name("x_data_ph").outputs[0] 88 | dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] 89 | probability_outputs = graph.get_operation_by_name("probability_outputs").outputs[0] 90 | 91 | # Make the prediction 92 | eval_feed_dict = {x_data_ph: x_data, dropout_keep_prob: 1.0} 93 | probability_prediction = sess.run(tf.reduce_mean(probability_outputs, 0), eval_feed_dict) 94 | 95 | # Print output (Or save to file or DB connection?) 96 | print('Probability of Spam: {:.4}'.format(probability_prediction[1])) 97 | 98 | 99 | # Run main module/tf App 100 | if __name__ == "__main__": 101 | if FLAGS.run_unit_tests: 102 | # Perform unit tests 103 | tf.test.main() 104 | else: 105 | # Run evaluation 106 | tf.app.run() -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/06_Using_TensorFlow_Serving/06_Using_TensorFlow_Serving_Client.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Using TensorFlow Serving (CLIENT) 3 | #---------------------------------- 4 | # 5 | # We show how to use "TensorFlow Serving", a model serving api from TensorFlow to serve a model. 6 | # 7 | # Pre-requisites: 8 | # - Visit https://www.tensorflow.org/serving/setup 9 | # and follow all the instructions on setting up TensorFlow Serving (including installing Bazel). 10 | # 11 | # The example we will query the TensorFlow-Serving-API we have running on port 9000 12 | 13 | import os 14 | import re 15 | import grpc 16 | import numpy as np 17 | import tensorflow as tf 18 | 19 | from tensorflow_serving.apis import predict_pb2 20 | from tensorflow_serving.apis import prediction_service_pb2 21 | 22 | tf.flags.DEFINE_string('server', '9000', 'PredictionService host') 23 | tf.flags.DEFINE_string('port', '0.0.0.0', 'PredictionService port') 24 | tf.flags.DEFINE_string('data_dir', 'temp', 'Folder where vocabulary is.') 25 | FLAGS = tf.flags.FLAGS 26 | 27 | 28 | # Def a functions to process texts into arrays of indices 29 | # Create a text cleaning function 30 | def clean_text(text_string): 31 | text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string) 32 | text_string = " ".join(text_string.split()) 33 | text_string = text_string.lower() 34 | return text_string 35 | 36 | 37 | # Load vocab processor 38 | def load_vocab(): 39 | vocab_path = os.path.join(FLAGS.data_dir, 'vocab') 40 | vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(vocab_path) 41 | return vocab_processor 42 | 43 | 44 | # Process input data: 45 | def process_data(input_data): 46 | vocab_processor = load_vocab() 47 | input_data = [clean_text(x) for x in input_data] 48 | processed_input = np.array(list(vocab_processor.transform(input_data))) 49 | return processed_input 50 | 51 | 52 | def get_results(data, server, port): 53 | channel = grpc.insecure_channel(':'.join([server, port])) 54 | stub = prediction_service_pb2.PredictionServiceStub(channel) 55 | processed_data = process_data(data) 56 | 57 | results = [] 58 | for input_x in processed_data: 59 | request = predict_pb2.PredictRequest() 60 | request.model_spec.name = 'spam_ham' 61 | request.model_spec.signature_name = 'predict_spam' # Change to predict spam 62 | request.inputs['texts'].CopyFrom(tf.contrib.util.make_tensor_proto(input_x, shape=[4, 20])) # 'texts' 63 | prediction_future = stub.Predict(request) 64 | prediction = prediction_future.result().outputs['scores'] 65 | # prediction = np.array(prediction_future.result().outputs['scores'].float_val) 66 | results.append(prediction) 67 | return results 68 | 69 | 70 | def main(data): 71 | if not FLAGS.server: 72 | print('please specify server host:port') 73 | return 74 | results = get_results(data, FLAGS.server, FLAGS.port) 75 | 76 | for input_text, output_pred in zip(data, results): 77 | print('Input text: {}, Prediction: {}'.format(input_text, output_pred)) 78 | 79 | 80 | if __name__ == '__main__': 81 | # Get sample data, here you may feel free to change this to a file, cloud-address, user input, etc... 82 | test_data = ['Please respond ASAP to claim your prize !', 83 | 'Hey, are you coming over for dinner tonight?', 84 | 'Text 444 now to see the top users in your area', 85 | 'drive safe, and thanks for visiting again!'] 86 | 87 | tf.app.run(argv=test_data) 88 | -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/images/file_structure.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/10_Taking_TensorFlow_to_Production/images/file_structure.jpg -------------------------------------------------------------------------------- /10_Taking_TensorFlow_to_Production/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/10_Taking_TensorFlow_to_Production/images/image.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/01_Visualizing_Computational_Graphs/01_using_tensorboard.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Using Tensorboard 3 | #---------------------------------- 4 | # 5 | # We illustrate the various ways to use 6 | # Tensorboard 7 | 8 | import os 9 | import io 10 | import time 11 | import pathlib 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import tensorflow as tf 15 | from tensorflow.python.framework import ops 16 | ops.reset_default_graph() 17 | 18 | # Initialize a graph session 19 | sess = tf.Session() 20 | 21 | # Create tensorboard folder if not exists 22 | if not os.path.exists('tensorboard'): 23 | os.makedirs('tensorboard') 24 | print('Running a slowed down linear regression. ' 25 | 'Run the command: $tensorboard --logdir="tensorboard" ' 26 | ' Then navigate to http://127.0.0.1:6006') 27 | 28 | # You can also specify a port option with --port 6006 29 | 30 | # Create a visualizer object 31 | summary_writer = tf.summary.FileWriter('tensorboard', sess.graph) 32 | 33 | # Wait a few seconds for user to run tensorboard commands 34 | time.sleep(3) 35 | 36 | # Some parameters 37 | batch_size = 50 38 | generations = 100 39 | 40 | # Create sample input data 41 | x_data = np.arange(1000)/10. 42 | true_slope = 2. 43 | y_data = x_data * true_slope + np.random.normal(loc=0.0, scale=25, size=1000) 44 | 45 | # Split into train/test 46 | train_ix = np.random.choice(len(x_data), size=int(len(x_data)*0.9), replace=False) 47 | test_ix = np.setdiff1d(np.arange(1000), train_ix) 48 | x_data_train, y_data_train = x_data[train_ix], y_data[train_ix] 49 | x_data_test, y_data_test = x_data[test_ix], y_data[test_ix] 50 | 51 | # Declare placeholders 52 | x_graph_input = tf.placeholder(tf.float32, [None]) 53 | y_graph_input = tf.placeholder(tf.float32, [None]) 54 | 55 | # Declare model variables 56 | m = tf.Variable(tf.random_normal([1], dtype=tf.float32), name='Slope') 57 | 58 | # Declare model 59 | output = tf.multiply(m, x_graph_input, name='Batch_Multiplication') 60 | 61 | # Declare loss function (L1) 62 | residuals = output - y_graph_input 63 | l1_loss = tf.reduce_mean(tf.abs(residuals), name="L1_Loss") 64 | 65 | # Declare optimization function 66 | my_optim = tf.train.GradientDescentOptimizer(0.01) 67 | train_step = my_optim.minimize(l1_loss) 68 | 69 | # Visualize a scalar 70 | with tf.name_scope('Slope_Estimate'): 71 | tf.summary.scalar('Slope_Estimate', tf.squeeze(m)) 72 | 73 | # Visualize a histogram (errors) 74 | with tf.name_scope('Loss_and_Residuals'): 75 | tf.summary.histogram('Histogram_Errors', tf.squeeze(l1_loss)) 76 | tf.summary.histogram('Histogram_Residuals', tf.squeeze(residuals)) 77 | 78 | 79 | # Declare summary merging operation 80 | summary_op = tf.summary.merge_all() 81 | 82 | # Initialize Variables 83 | init = tf.global_variables_initializer() 84 | sess.run(init) 85 | 86 | for i in range(generations): 87 | batch_indices = np.random.choice(len(x_data_train), size=batch_size) 88 | x_batch = x_data_train[batch_indices] 89 | y_batch = y_data_train[batch_indices] 90 | _, train_loss, summary = sess.run([train_step, l1_loss, summary_op], 91 | feed_dict={x_graph_input: x_batch, 92 | y_graph_input: y_batch}) 93 | 94 | test_loss, test_resids = sess.run([l1_loss, residuals], feed_dict={x_graph_input: x_data_test, 95 | y_graph_input: y_data_test}) 96 | 97 | if (i + 1) % 10 == 0: 98 | print('Generation {} of {}. Train Loss: {:.3}, Test Loss: {:.3}.'.format(i+1, generations, train_loss, test_loss)) 99 | 100 | log_writer = tf.summary.FileWriter('tensorboard') 101 | log_writer.add_summary(summary, i) 102 | time.sleep(0.5) 103 | 104 | #Create a function to save a protobuf bytes version of the graph 105 | def gen_linear_plot(slope): 106 | linear_prediction = x_data * slope 107 | plt.plot(x_data, y_data, 'b.', label='data') 108 | plt.plot(x_data, linear_prediction, 'r-', linewidth=3, label='predicted line') 109 | plt.legend(loc='upper left') 110 | buf = io.BytesIO() 111 | plt.savefig(buf, format='png') 112 | buf.seek(0) 113 | return(buf) 114 | 115 | # Add image to tensorboard (plot the linear fit!) 116 | slope = sess.run(m) 117 | plot_buf = gen_linear_plot(slope[0]) 118 | # Convert PNG buffer to TF image 119 | image = tf.image.decode_png(plot_buf.getvalue(), channels=4) 120 | # Add the batch dimension 121 | image = tf.expand_dims(image, 0) 122 | # Add image summary 123 | image_summary_op = tf.summary.image("Linear_Plot", image) 124 | image_summary = sess.run(image_summary_op) 125 | log_writer.add_summary(image_summary, i) 126 | log_writer.close() 127 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/02_Working_with_a_Genetic_Algorithm/02_genetic_algorithm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Implementing a Genetic Algorithm 3 | # ------------------------------- 4 | # 5 | # Genetic Algorithm Optimization in TensorFlow 6 | # 7 | # We are going to implement a genetic algorithm 8 | # to optimize to a ground truth array. The ground 9 | # truth will be an array of 50 floating point 10 | # numbers which are generated by: 11 | # f(x)=sin(2*pi*x/50) where 0= mutation] = 0 102 | 103 | # Run GA step 104 | feed_dict = {truth_ph: truth.reshape([1, features]), 105 | crossover_mat_ph: crossover_mat, 106 | mutation_val_ph: mutation_values} 107 | step.run(feed_dict, session=sess) 108 | best_individual_val = sess.run(best_individual, feed_dict=feed_dict) 109 | 110 | if i % 5 == 0: 111 | best_fit = sess.run(best_val, feed_dict = feed_dict) 112 | print('Generation: {}, Best Fitness (lowest MSE): {:.2}'.format(i, -best_fit)) 113 | 114 | plt.plot(truth, label="True Values") 115 | plt.plot(np.squeeze(best_individual_val), label="Best Individual") 116 | plt.axis((0, features, -1.25, 1.25)) 117 | plt.legend(loc='upper right') 118 | plt.show() 119 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/03_Clustering_Using_KMeans/03_k_means.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # K-means with TensorFlow 3 | #---------------------------------- 4 | # 5 | # This script shows how to do k-means with TensorFlow 6 | 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | import tensorflow as tf 10 | from sklearn import datasets 11 | from scipy.spatial import cKDTree 12 | from sklearn.decomposition import PCA 13 | from sklearn.preprocessing import scale 14 | from tensorflow.python.framework import ops 15 | ops.reset_default_graph() 16 | 17 | sess = tf.Session() 18 | 19 | iris = datasets.load_iris() 20 | 21 | num_pts = len(iris.data) 22 | num_feats = len(iris.data[0]) 23 | 24 | # Set k-means parameters 25 | # There are 3 types of iris flowers, see if we can predict them 26 | k = 3 27 | generations = 25 28 | 29 | data_points = tf.Variable(iris.data) 30 | cluster_labels = tf.Variable(tf.zeros([num_pts], dtype=tf.int64)) 31 | 32 | # Randomly choose starting points 33 | rand_starts = np.array([iris.data[np.random.choice(len(iris.data))] for _ in range(k)]) 34 | 35 | centroids = tf.Variable(rand_starts) 36 | 37 | # In order to calculate the distance between every data point and every centroid, we 38 | # repeat the centroids into a (num_points) by k matrix. 39 | centroid_matrix = tf.reshape(tf.tile(centroids, [num_pts, 1]), [num_pts, k, num_feats]) 40 | # Then we reshape the data points into k (3) repeats 41 | point_matrix = tf.reshape(tf.tile(data_points, [1, k]), [num_pts, k, num_feats]) 42 | distances = tf.reduce_sum(tf.square(point_matrix - centroid_matrix), axis=2) 43 | 44 | # Find the group it belongs to with tf.argmin() 45 | centroid_group = tf.argmin(distances, 1) 46 | 47 | 48 | # Find the group average 49 | def data_group_avg(group_ids, data): 50 | # Sum each group 51 | sum_total = tf.unsorted_segment_sum(data, group_ids, 3) 52 | # Count each group 53 | num_total = tf.unsorted_segment_sum(tf.ones_like(data), group_ids, 3) 54 | # Calculate average 55 | avg_by_group = sum_total/num_total 56 | return avg_by_group 57 | 58 | 59 | means = data_group_avg(centroid_group, data_points) 60 | 61 | update = tf.group(centroids.assign(means), cluster_labels.assign(centroid_group)) 62 | 63 | init = tf.global_variables_initializer() 64 | 65 | sess.run(init) 66 | 67 | for i in range(generations): 68 | print('Calculating gen {}, out of {}.'.format(i, generations)) 69 | _, centroid_group_count = sess.run([update, centroid_group]) 70 | group_count = [] 71 | for ix in range(k): 72 | group_count.append(np.sum(centroid_group_count==ix)) 73 | print('Group counts: {}'.format(group_count)) 74 | 75 | 76 | [centers, assignments] = sess.run([centroids, cluster_labels]) 77 | 78 | 79 | # Find which group assignments correspond to which group labels 80 | # First, need a most common element function 81 | def most_common(my_list): 82 | return max(set(my_list), key=my_list.count) 83 | 84 | 85 | label0 = most_common(list(assignments[0:50])) 86 | label1 = most_common(list(assignments[50:100])) 87 | label2 = most_common(list(assignments[100:150])) 88 | 89 | group0_count = np.sum(assignments[0:50] == label0) 90 | group1_count = np.sum(assignments[50:100] == label1) 91 | group2_count = np.sum(assignments[100:150] == label2) 92 | 93 | accuracy = (group0_count + group1_count + group2_count)/150. 94 | 95 | print('Accuracy: {:.2}'.format(accuracy)) 96 | 97 | # Also plot the output 98 | # First use PCA to transform the 4-dimensional data into 2-dimensions 99 | pca_model = PCA(n_components=2) 100 | reduced_data = pca_model.fit_transform(iris.data) 101 | # Transform centers 102 | reduced_centers = pca_model.transform(centers) 103 | 104 | # Step size of mesh for plotting 105 | h = .02 106 | 107 | # Plot the decision boundary. For that, we will assign a color to each 108 | x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1 109 | y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1 110 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) 111 | 112 | # Get k-means classifications for the grid points 113 | xx_pt = list(xx.ravel()) 114 | yy_pt = list(yy.ravel()) 115 | xy_pts = np.array([[x, y] for x, y in zip(xx_pt, yy_pt)]) 116 | mytree = cKDTree(reduced_centers) 117 | dist, indexes = mytree.query(xy_pts) 118 | 119 | # Put the result into a color plot 120 | indexes = indexes.reshape(xx.shape) 121 | plt.figure(1) 122 | plt.clf() 123 | plt.imshow(indexes, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, 124 | aspect='auto', origin='lower') 125 | 126 | # Plot each of the true iris data groups 127 | symbols = ['o', '^', 'D'] 128 | label_name = ['Setosa', 'Versicolour', 'Virginica'] 129 | for i in range(3): 130 | temp_group = reduced_data[(i*50):(50)*(i+1)] 131 | plt.plot(temp_group[:, 0], temp_group[:, 1], symbols[i], markersize=10, label=label_name[i]) 132 | # Plot the centroids as a white X 133 | plt.scatter(reduced_centers[:, 0], reduced_centers[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) 134 | plt.title('K-means clustering on Iris Dataset Centroids are marked with white cross') 135 | plt.xlim(x_min, x_max) 136 | plt.ylim(y_min, y_max) 137 | plt.legend(loc='lower right') 138 | plt.show() 139 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/04_Solving_A_System_of_ODEs/04_solving_ode_system.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Solving a Sytem of ODEs 3 | #---------------------------------- 4 | # 5 | # In this script, we use TensorFlow to solve a sytem 6 | # of ODEs. 7 | # 8 | # The system of ODEs we will solve is the Lotka-Volterra 9 | # predator-prey system. 10 | 11 | 12 | # Declaring Operations 13 | import matplotlib.pyplot as plt 14 | import tensorflow as tf 15 | from tensorflow.python.framework import ops 16 | ops.reset_default_graph() 17 | 18 | # Open interactive graph session 19 | sess = tf.Session() 20 | 21 | # Discrete Lotka-Volterra predator/prey equations 22 | # 23 | # X(t+1) = X(t) + (aX(t) + bX(t)Y(t)) * t_delta # Prey 24 | # 25 | # Y(t+1) = Y(t) + (cY(t) + dX(t)Y(t)) * t_delta # Predator 26 | 27 | # Declare constants and variables 28 | x_initial = tf.constant(1.0) 29 | y_initial = tf.constant(1.0) 30 | X_t1 = tf.Variable(x_initial) 31 | Y_t1 = tf.Variable(y_initial) 32 | 33 | # Make the placeholders 34 | t_delta = tf.placeholder(tf.float32, shape=()) 35 | a = tf.placeholder(tf.float32, shape=()) 36 | b = tf.placeholder(tf.float32, shape=()) 37 | c = tf.placeholder(tf.float32, shape=()) 38 | d = tf.placeholder(tf.float32, shape=()) 39 | 40 | # Discretized ODE update 41 | X_t2 = X_t1 + (a * X_t1 + b * X_t1 * Y_t1) * t_delta 42 | Y_t2 = Y_t1 + (c * Y_t1 + d * X_t1 * Y_t1) * t_delta 43 | 44 | # Update to New Population 45 | step = tf.group( 46 | X_t1.assign(X_t2), 47 | Y_t1.assign(Y_t2)) 48 | 49 | init = tf.global_variables_initializer() 50 | sess.run(init) 51 | 52 | # Run the ODE 53 | prey_values = [] 54 | predator_values = [] 55 | for i in range(1000): 56 | # Step simulation (using constants for a known cyclic solution) 57 | step.run({a: (2./3.), b: (-4./3.), c: -1.0, d: 1.0, t_delta: 0.01}, session=sess) 58 | # Store each outcome 59 | temp_prey, temp_pred = sess.run([X_t1, Y_t1]) 60 | prey_values.append(temp_prey) 61 | predator_values.append(temp_pred) 62 | 63 | # Visualize the output 64 | plt.plot(prey_values) 65 | plt.plot(predator_values) 66 | plt.legend(['Prey', 'Predator'], loc='upper right') 67 | plt.show() 68 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/05_Using_a_Random_Forest/05_Using_a_Random_Forest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Using a Random Forest 3 | --------------------- 4 | 5 | This script will illustrate how to use TensorFlow's Boosted Random Forest algorithm. 6 | 7 | 8 | For illustrative purposes we will show how to do this with the boston housing data. 9 | 10 | Attribute Information: 11 | 12 | 1. CRIM per capita crime rate by town 13 | 2. ZN proportion of residential land zoned for lots over 14 | 25,000 sq.ft. 15 | 3. INDUS proportion of non-retail business acres per town 16 | 4. CHAS Charles River dummy variable (= 1 if tract bounds 17 | river; 0 otherwise) 18 | 5. NOX nitric oxides concentration (parts per 10 million) 19 | 6. RM average number of rooms per dwelling 20 | 7. AGE proportion of owner-occupied units built prior to 1940 21 | 8. DIS weighted distances to five Boston employment centres 22 | 9. RAD index of accessibility to radial highways 23 | 10. TAX full-value property-tax rate per $10,000 24 | 11. PTRATIO pupil-teacher ratio by town 25 | 12. B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks 26 | by town 27 | 13. LSTAT % lower status of the population 28 | 14. y_target Median value of owner-occupied homes in $1000's. 29 | """ 30 | 31 | import os 32 | import numpy as np 33 | import tensorflow as tf 34 | from keras.datasets import boston_housing 35 | from tensorflow.python.framework import ops 36 | ops.reset_default_graph() 37 | 38 | # For using the boosted trees classifier (binary classification) in TF: 39 | # Note: target labels have to be 0 and 1. 40 | boosted_classifier = tf.estimator.BoostedTreesClassifier 41 | 42 | # For using a boosted trees regression classifier (binary classification) in TF: 43 | regression_classifier = tf.estimator.BoostedTreesRegressor 44 | 45 | # Load data 46 | (x_train, y_train), (x_test, y_test) = boston_housing.load_data() 47 | 48 | # Set model parameters 49 | # Batch size 50 | batch_size = 32 51 | # Number of training steps 52 | train_steps = 500 53 | # Number of trees in our 'forest' 54 | n_trees = 100 55 | # Maximum depth of any tree in forest 56 | max_depth = 6 57 | 58 | # Data ETL 59 | binary_split_cols = ['CHAS', 'RAD'] 60 | col_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT'] 61 | X_dtrain = {col: x_train[:, ix] for ix, col in enumerate(col_names)} 62 | X_dtest = {col: x_test[:, ix] for ix, col in enumerate(col_names)} 63 | 64 | # Create feature columns! 65 | feature_cols = [] 66 | for ix, column in enumerate(x_train.T): 67 | col_name = col_names[ix] 68 | 69 | # Create binary split feature 70 | if col_name in binary_split_cols: 71 | # To create 2 buckets, need 1 boundary - the mean 72 | bucket_boundaries = [column.mean()] 73 | numeric_feature = tf.feature_column.numeric_column(col_name) 74 | final_feature = tf.feature_column.bucketized_column(source_column=numeric_feature, boundaries=bucket_boundaries) 75 | # Create bucketed feature 76 | else: 77 | # To create 5 buckets, need 4 boundaries 78 | bucket_boundaries = list(np.linspace(column.min() * 1.1, column.max() * 0.9, 4)) 79 | numeric_feature = tf.feature_column.numeric_column(col_name) 80 | final_feature = tf.feature_column.bucketized_column(source_column=numeric_feature, boundaries=bucket_boundaries) 81 | 82 | # Add feature to feature_col list 83 | feature_cols.append(final_feature) 84 | 85 | 86 | # Create an input function 87 | input_fun = tf.estimator.inputs.numpy_input_fn(X_dtrain, y=y_train, batch_size=batch_size, num_epochs=10, shuffle=True) 88 | 89 | # Training 90 | model = regression_classifier(feature_columns=feature_cols, 91 | n_trees=n_trees, 92 | max_depth=max_depth, 93 | learning_rate=0.25, 94 | n_batches_per_layer=batch_size) 95 | model.train(input_fn=input_fun, steps=train_steps) 96 | 97 | # Evaluation on test set 98 | # Do not shuffle when predicting 99 | p_input_fun = tf.estimator.inputs.numpy_input_fn(X_dtest, y=y_test, batch_size=batch_size, num_epochs=1, shuffle=False) 100 | # Get predictions 101 | predictions = list(model.predict(input_fn=p_input_fun)) 102 | final_preds = [pred['predictions'][0] for pred in predictions] 103 | 104 | # Get accuracy (mean absolute error, MAE) 105 | mae = np.mean([np.abs((actual - predicted) / predicted) for actual, predicted in zip(y_test, final_preds)]) 106 | print('Mean Abs Err on test set: {}'.format(acc)) 107 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/06_Using_TensorFlow_with_Keras/06_Using_TensorFlow_with_Keras.py: -------------------------------------------------------------------------------- 1 | # Using TensorFlow with Keras 2 | #---------------------------------- 3 | # 4 | # This script will show you how to create model layers with Keras 5 | # 6 | 7 | import tensorflow as tf 8 | from sklearn.preprocessing import MultiLabelBinarizer 9 | from keras.utils import to_categorical 10 | from tensorflow import keras 11 | from tensorflow.python.framework import ops 12 | ops.reset_default_graph() 13 | 14 | # Load MNIST data 15 | from tensorflow.examples.tutorials.mnist import input_data 16 | 17 | # The following loads the MNIST dataset into 18 | # 19 | # mnist.[train/test].[images/labels] 20 | # 21 | # where images are a 1x784 flatt array and labels are an integer between 0 and 9. 22 | # 23 | 24 | mnist = input_data.read_data_sets("MNIST_data/") 25 | x_train = mnist.train.images 26 | x_test = mnist.test.images 27 | y_train = mnist.train.labels 28 | y_train = [[i] for i in y_train] 29 | y_test = mnist.test.labels 30 | y_test = [[i] for i in y_test] 31 | 32 | # One-hot encode labels 33 | one_hot = MultiLabelBinarizer() 34 | y_train = one_hot.fit_transform(y_train) 35 | y_test = one_hot.transform(y_test) 36 | 37 | # Example 1: Fully connected neural network model 38 | # We start with a 'sequential' model type (connecting layers together) 39 | model = keras.Sequential() 40 | # Adds a densely-connected layer with 32 units to the model, followed by an ReLU activation. 41 | model.add(keras.layers.Dense(32, activation='relu')) 42 | # Adds a densely-connected layer with 16 units to the model, followed by an ReLU activation. 43 | model.add(keras.layers.Dense(16, activation='relu')) 44 | # Add a softmax layer with 10 output units: 45 | model.add(keras.layers.Dense(10, activation='softmax')) 46 | 47 | # Train the model: 48 | model.compile(optimizer=tf.train.AdamOptimizer(0.001), 49 | loss='categorical_crossentropy', 50 | metrics=['accuracy']) 51 | 52 | # Configure a model for mean-squared error regression. 53 | # model.compile(optimizer=tf.train.AdamOptimizer(0.01), 54 | # loss='mse', # mean squared error 55 | # metrics=['mae']) # mean absolute error 56 | 57 | # Configure a model for categorical classification. 58 | #model.compile(optimizer=tf.train.RMSPropOptimizer(0.01), 59 | # loss=keras.losses.categorical_crossentropy, 60 | # metrics=[keras.metrics.categorical_accuracy]) 61 | 62 | # Fit the model: 63 | model.fit(x_train, 64 | y_train, 65 | epochs=5, 66 | batch_size=64, 67 | validation_data=(x_test, y_test)) 68 | 69 | 70 | # --------------------- 71 | # Simple CNN in Keras: 72 | # --------------------- 73 | # First we transform the input images from 1D arrays to 2D matrices. (28 x 28) 74 | x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) 75 | x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) 76 | input_shape = (28, 28, 1) 77 | num_classes = 10 78 | 79 | # Categorize y targets 80 | y_test = to_categorical(mnist.test.labels) 81 | y_train = to_categorical(mnist.train.labels) 82 | 83 | # Decrease test size for memory usage 84 | x_test = x_test[:64] 85 | y_test = y_test[:64] 86 | 87 | # Start our sequential model 88 | cnn_model = keras.Sequential() 89 | cnn_model.add(keras.layers.Conv2D(25, 90 | kernel_size=(4, 4), 91 | strides=(1, 1), 92 | activation='relu', 93 | input_shape=input_shape)) 94 | cnn_model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), 95 | strides=(2, 2))) 96 | 97 | cnn_model.add(keras.layers.Conv2D(50, 98 | kernel_size=(5, 5), 99 | strides=(1, 1), 100 | activation='relu')) 101 | 102 | cnn_model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), 103 | strides=(2, 2))) 104 | 105 | cnn_model.add(keras.layers.Flatten()) 106 | 107 | cnn_model.add(keras.layers.Dense(num_classes, activation='softmax')) 108 | 109 | cnn_model.compile(optimizer=tf.train.AdamOptimizer(0.001), 110 | loss='categorical_crossentropy', 111 | metrics=['accuracy']) 112 | 113 | 114 | class AccuracyHistory(keras.callbacks.Callback): 115 | def on_train_begin(self, logs={}): 116 | self.acc = [] 117 | 118 | def on_epoch_end(self, batch, logs={}): 119 | self.acc.append(logs.get('acc')) 120 | 121 | 122 | history = AccuracyHistory() 123 | 124 | cnn_model.fit(x_train, 125 | y_train, 126 | batch_size=64, 127 | epochs=3, 128 | verbose=1, 129 | validation_data=(x_test, y_test), 130 | callbacks=[history]) 131 | 132 | print(history.acc) 133 | -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/01_tensorboard1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/01_tensorboard1.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/01_tensorboard2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/01_tensorboard2.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/01_tensorboard3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/01_tensorboard3.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/02_genetic_algorithm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/02_genetic_algorithm.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/03_kmeans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/03_kmeans.png -------------------------------------------------------------------------------- /11_More_with_TensorFlow/images/04_ode_system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-2-Machine-Learning-Cookbook/c73b6aeef19e0b53cfd84029afc89711aded861b/11_More_with_TensorFlow/images/04_ode_system.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Packt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow-2-Machine-Learning-Cookbook 2 | TensorFlow 2 Machine Learning Cookbook, published by Packt 3 | --------------------------------------------------------------------------------