├── .gitignore ├── .gitmodules ├── Chapter01 └── ch-01_Overview_of_TensorFlow_and_Machine_Learning.ipynb ├── Chapter02 ├── README.md ├── ch-02_Detecting_Explonaets_in_Outer_Space.ipynb ├── decision_tree.dia ├── decision_tree.png └── tfbt-table.png ├── Chapter03 ├── README.md ├── Run_On_Browser.html ├── TensorflowJS │ ├── README.md │ ├── Run_On_Browser.html │ ├── group1-shard1of1 │ ├── group2-shard1of1 │ ├── group3-shard1of1 │ ├── group4-shard1of1 │ ├── group5-shard1of1 │ ├── main.py │ ├── model.json │ ├── model_functions.py │ ├── preprocess_functions.py │ ├── requirements.txt │ ├── sentiment.txt │ └── token_index.csv ├── group1-shard1of1 ├── group2-shard1of1 ├── group3-shard1of1 ├── group4-shard1of1 ├── group5-shard1of1 ├── main.py ├── model.json ├── model_functions.py ├── preprocess_functions.py ├── requirements.txt ├── sentiment.txt └── token_index.csv ├── Chapter04 ├── README.md ├── Tensorflow_Lite │ ├── README.md │ ├── main.py │ ├── parameters.py │ ├── requirements.txt │ └── utils.py ├── main.py ├── parameters.py ├── requirements.txt └── utils.py ├── Chapter06 ├── Data │ ├── GE.csv │ ├── GOOG.csv │ └── NFLX.csv ├── GP.py ├── GaussianProcess │ ├── Data │ │ ├── GE.csv │ │ ├── GOOG.csv │ │ └── NFLX.csv │ ├── GP.py │ ├── Images │ │ ├── GE_1992_2018_adj_closing_prices.png │ │ ├── GE_2008_2016_adj_closing_prices.png │ │ ├── GE_2008_2016_prices_normalized.png │ │ ├── GE_2008_2018_adj_closing_prices.png │ │ ├── GE_2017_predicted.png │ │ ├── GE_2018_predicted.png │ │ ├── GOOG_2004_2018_adj_closing_prices.png │ │ ├── GOOG_2008_2016_adj_closing_prices.png │ │ ├── GOOG_2008_2016_prices_normalized.png │ │ ├── GOOG_2008_2018_adj_closing_prices.png │ │ ├── GOOG_2017_predicted.png │ │ ├── GOOG_2018_predicted.png │ │ ├── NFLX_2002_2018_adj_closing_prices.png │ │ ├── NFLX_2008_2016_adj_closing_prices.png │ │ ├── NFLX_2008_2016_prices_normalized.png │ │ ├── NFLX_2008_2018_adj_closing_prices.png │ │ ├── NFLX_2017_predicted.png │ │ └── NFLX_2018_predicted.png │ ├── PreProcessing.py │ ├── README.md │ ├── VisualizeData.py │ ├── main.py │ └── requirements.txt ├── Images │ ├── GE_1992_2018_adj_closing_prices.png │ ├── GE_2008_2016_adj_closing_prices.png │ ├── GE_2008_2016_prices_normalized.png │ ├── GE_2008_2018_adj_closing_prices.png │ ├── GE_2017_predicted.png │ ├── GE_2018_predicted.png │ ├── GOOG_2004_2018_adj_closing_prices.png │ ├── GOOG_2008_2016_adj_closing_prices.png │ ├── GOOG_2008_2016_prices_normalized.png │ ├── GOOG_2008_2018_adj_closing_prices.png │ ├── GOOG_2017_predicted.png │ ├── GOOG_2018_predicted.png │ ├── NFLX_2002_2018_adj_closing_prices.png │ ├── NFLX_2008_2016_adj_closing_prices.png │ ├── NFLX_2008_2016_prices_normalized.png │ ├── NFLX_2008_2018_adj_closing_prices.png │ ├── NFLX_2017_predicted.png │ └── NFLX_2018_predicted.png ├── PreProcessing.py ├── README.md ├── VisualizeData.py ├── main.py └── requirements.txt ├── Chapter07 ├── README.md ├── data │ └── Download_Data_Zip_File_Here.txt ├── main.py ├── model.py ├── parameters.py ├── plots │ ├── .DS_Store │ ├── Confusion_Matrix_with_threshold_3.89949698999.png │ ├── Loss_Curves.png │ ├── Precision_Threshold_Curve.png │ ├── Recall_Threshold_Curve.png │ ├── Recon_Error_with_Fraud_Transactions.png │ └── Recon_Error_with_Normal_Transactions.png ├── requirements.txt └── utils.py ├── Chapter08 ├── Code │ ├── README.md │ ├── bnn.py │ ├── parameters.py │ ├── requirements.txt │ └── utils.py ├── Data │ └── Download_Data_Here.txt └── README.md ├── Chapter09 ├── DiscoGAN.py ├── README.md ├── main.py ├── parameters.py ├── requirements.txt └── utils.py ├── Chapter10 ├── CapsNet.py ├── README.md ├── data │ └── fashion-mnist │ │ ├── t10k-images-idx3-ubyte │ │ ├── t10k-labels-idx1-ubyte │ │ ├── train-images-idx3-ubyte │ │ └── train-labels-idx1-ubyte ├── helper_functions.py ├── imgs │ ├── img1.png │ ├── img2.png │ ├── img3.png │ └── img4.png ├── main.py ├── parameter_config.py ├── requirements.txt └── results │ ├── decoder_images.png │ ├── input_images.png │ ├── test.csv │ ├── train.csv │ └── validation.csv ├── Chapter11 ├── ch-11.ipynb ├── model.png └── netflix.png ├── Chapter12 ├── Images.zip ├── data │ ├── buses │ │ ├── Community-Transit-15808-Double-Tall-in-Downtown-Seattle.JPG │ │ ├── Torrens-Transit-Australian-Bus-Manufacturing--CB62A--bodied-.jpg │ │ ├── bus-2554946-960-720.jpg │ │ ├── image-12.jpeg │ │ ├── image-13.jpeg │ │ ├── image-14.jpeg │ │ ├── image-15.jpeg │ │ ├── image-16.jpeg │ │ ├── image-17.jpeg │ │ ├── image-18.jpeg │ │ ├── image-19.jpeg │ │ ├── image-2.jpeg │ │ ├── image-20.jpeg │ │ ├── image-21.jpeg │ │ ├── image-22.jpeg │ │ ├── image-23.jpeg │ │ ├── image-24.jpeg │ │ ├── image-26.jpeg │ │ ├── image-28.jpeg │ │ ├── image-29.jpeg │ │ ├── image-30.jpeg │ │ ├── image-31.jpeg │ │ ├── image-32.jpeg │ │ ├── image-4.jpeg │ │ ├── image-5.jpeg │ │ ├── image-9.jpeg │ │ ├── images-10.jpeg │ │ ├── images-15.jpeg │ │ ├── images-16.jpeg │ │ ├── images-18.jpeg │ │ ├── images-19.jpeg │ │ ├── images-2.jpeg │ │ ├── images-20.jpeg │ │ ├── images-21.jpeg │ │ ├── images-23.jpeg │ │ ├── images-24.jpeg │ │ ├── images-25.jpeg │ │ ├── images-26.jpeg │ │ ├── images-27.jpeg │ │ ├── images-28.jpeg │ │ ├── images-29.jpeg │ │ ├── images-30.jpeg │ │ ├── images-4.jpeg │ │ ├── images-5.jpeg │ │ ├── images-7.jpeg │ │ ├── images-8.jpeg │ │ ├── images-9.jpeg │ │ ├── images.jpeg │ │ └── london-2665352-960-720.jpg │ └── cars │ │ ├── image-10.jpeg │ │ ├── image-11.jpeg │ │ ├── image-12.jpeg │ │ ├── image-13.jpeg │ │ ├── image-14.jpeg │ │ ├── image-15.jpeg │ │ ├── image-16.jpeg │ │ ├── image-17.jpeg │ │ ├── image-19.jpeg │ │ ├── image-2.jpeg │ │ ├── image-20.jpeg │ │ ├── image-21.jpeg │ │ ├── image-22.jpeg │ │ ├── image-23.jpeg │ │ ├── image-24.jpeg │ │ ├── image-25.jpeg │ │ ├── image-27.jpeg │ │ ├── image-28.jpeg │ │ ├── image-29.jpeg │ │ ├── image-3.jpeg │ │ ├── image-4.jpeg │ │ ├── image-5.jpeg │ │ ├── image-7.jpeg │ │ ├── image-8.jpeg │ │ ├── image-9.jpeg │ │ ├── images-13.jpeg │ │ ├── images-14.jpeg │ │ ├── images-15.jpeg │ │ ├── images-19.jpeg │ │ ├── images-2.jpeg │ │ ├── images-20.jpeg │ │ ├── images-21.jpeg │ │ ├── images-23.jpeg │ │ ├── images-26.jpeg │ │ ├── images-27.jpeg │ │ ├── images-28.jpeg │ │ ├── images-3.jpeg │ │ ├── images-4.jpeg │ │ ├── images-5.jpeg │ │ ├── images-7.jpeg │ │ └── images-8.jpeg ├── distributed.py ├── mnist_TFoS.py ├── object_detection.py ├── readme.md └── tensorflow_distributed_dl.py ├── Chapter13 ├── README.md ├── data │ └── postgre_book.txt ├── main.py ├── model.py ├── parameters.py ├── requirements.txt └── utils.py ├── Chapter14 ├── ch-14_Reinforcement_Learning.ipynb └── pacman-1.png ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | /Chapter02/tfbtmodel/ 3 | /Chapter02/tfmodel/ 4 | .ipynb_checkpoints/ 5 | /.ipynb_checkpoints/ 6 | /.idea/ 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "datasetslib"] 2 | path = datasetslib 3 | url = git@github.com:armando-fandango/datasetslib.git 4 | -------------------------------------------------------------------------------- /Chapter02/README.md: -------------------------------------------------------------------------------- 1 | Link for data used https://www.kaggle.com/keplersmachines/kepler-labelled-time-series-data/home 2 | -------------------------------------------------------------------------------- /Chapter02/decision_tree.dia: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter02/decision_tree.dia -------------------------------------------------------------------------------- /Chapter02/decision_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter02/decision_tree.png -------------------------------------------------------------------------------- /Chapter02/tfbt-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter02/tfbt-table.png -------------------------------------------------------------------------------- /Chapter03/README.md: -------------------------------------------------------------------------------- 1 | # Sentiment Analysis Using Tensorflow Js 2 | This folder consists of the code for Chapter 1 of the book. It creates a sentiment anaylsis model using the movie reviews dataset mentioned. 3 | 4 | ## Data 5 | * https://www.kaggle.com/c/si650winter11/data 6 | 7 | ### Installations 8 | * This code is checked on using native Python 3.6 with Anaconda 9 | * Create a conda virtual environment and install relevant packages using requirements.txt file 10 | ``` 11 | pip install requirements.txt 12 | ``` 13 | ### Python Code Run Instructions 14 | Navigate the terminal to the directory and execute the command in virtual environment 15 | ``` 16 | python main.py 17 | ``` 18 | This will create the model 19 | 20 | ### Run HTML File 21 | Open the file Run_On_Browser.HTML in Chrome to run the model on browser. Note that you may need to configure your server to allow [Cross-Origin Resource Sharing (CORS)](https://enable-cors.org/), in order to allow fetching the files in JavaScript. Easy way to get CORS working is to install [Chrome CORS Extension](https://chrome.google.com/webstore/detail/allow-control-allow-origi/nlfbmbojpeacfghkpbjhddihlkkiljbi?hl=en). 22 | 23 | Once CORS is enabled you need to start a python HTTP server for serving the model to the domain. This can be achieved by exectuing the following command in the directory of model json and other files. 24 | ``` 25 | python -m SimpleHTTPServer 26 | ``` 27 | Once the server is started you can go to browser and access the HTML file. Note that as soon as you load the file it will take 1-2 seconds to load the model and token index file. 28 | Once the system is ready, type in a review and click submit. It should print the entire Tensor of score at the Top (apologies for formatting). 29 | 30 | Typical Inputs to try: 31 | * awesome movie 32 | * Terrible movie 33 | * that movie really sucks 34 | * I like that movie 35 | * hate the movie 36 | 37 | Play around and enjoy!! 38 | 39 | #### TroubleShooting 40 | Note that you might encounter an error while loading the model json in HTML. This might be because of names of layer mismatch.This is an ongoing issue with Keras and they are trying to fix it. For now, you might need to remove "gru_cell" part from all the layer names in the json manually. For e.g. "gru_1/gru_cell/biases" -> "gru_1/biases" 41 | -------------------------------------------------------------------------------- /Chapter03/Run_On_Browser.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Type Movie Review in the Box and Hit Submit 5 |
6 | 7 | 8 | 9 | 10 |
11 | 12 | 13 | 14 | 15 | 16 | 105 |

106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/README.md: -------------------------------------------------------------------------------- 1 | # Sentiment Analysis Using Tensorflow Js 2 | This folder consists of the code for Chapter 1 of the book. It creates a sentiment anaylsis model using the movie reviews dataset mentioned. 3 | ### Installations 4 | * This code is checked on using native Python 3.6 with Anaconda 5 | * Create a conda virtual environment and install relevant packages using requirements.txt file 6 | ``` 7 | pip install requirements.txt 8 | ``` 9 | ### Python Code Run Instructions 10 | Navigate the terminal to the directory and execute the command in virtual environment 11 | ``` 12 | python main.py 13 | ``` 14 | This will create the model 15 | 16 | ### Run HTML File 17 | Open the file Run_On_Browser.HTML in Chrome to run the model on browser. Note that you may need to configure your server to allow [Cross-Origin Resource Sharing (CORS)](https://enable-cors.org/), in order to allow fetching the files in JavaScript. Easy way to get CORS working is to install [Chrome CORS Extension](https://chrome.google.com/webstore/detail/allow-control-allow-origi/nlfbmbojpeacfghkpbjhddihlkkiljbi?hl=en). 18 | 19 | Once CORS is enabled you need to start a python HTTP server for serving the model to the domain. This can be achieved by exectuing the following command in the directory of model json and other files. 20 | ``` 21 | python -m SimpleHTTPServer 22 | ``` 23 | Once the server is started you can go to browser and access the HTML file. Note that as soon as you load the file it will take 1-2 seconds to load the model and token index file. 24 | Once the system is ready, type in a review and click submit. It should print the entire Tensor of score at the Top (apologies for formatting). 25 | 26 | Typical Inputs to try: 27 | * awesome movie 28 | * Terrible movie 29 | * that movie really sucks 30 | * I like that movie 31 | * hate the movie 32 | 33 | Play around and enjoy!! 34 | 35 | #### TroubleShooting 36 | Note that you might encounter an error while loading the model json in HTML. This might be because of names of layer mismatch.This is an ongoing issue with Keras and they are trying to fix it. For now, you might need to remove "gru_cell" part from all the layer names in the json manually. For e.g. "gru_1/gru_cell/biases" -> "gru_1/biases" 37 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/Run_On_Browser.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Type Movie Review in the Box and Hit Submit 5 |
6 | 7 | 8 | 9 | 10 |
11 | 12 | 13 | 14 | 15 | 16 | 105 |

106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/group1-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/TensorflowJS/group1-shard1of1 -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/group2-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/TensorflowJS/group2-shard1of1 -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/group3-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/TensorflowJS/group3-shard1of1 -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/group4-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/TensorflowJS/group4-shard1of1 -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/group5-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/TensorflowJS/group5-shard1of1 -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This script takes the input data and trains a sentiment analysis model using neural networks 5 | @author: ankit.jain 6 | """ 7 | 8 | from preprocess_functions import * # Importing all preprocessing functions 9 | from model_functions import * # Importing all model functions 10 | import tensorflow as tf 11 | import os 12 | import tensorflowjs as tfjs 13 | 14 | 15 | # PARAMETERS 16 | current_dir = os.path.dirname(os.path.realpath(__file__)) 17 | INPUT_FILE = os.path.join(current_dir, "sentiment.txt") 18 | OUTPUT_DIR = current_dir 19 | 20 | def main(): 21 | #Read and preprocessing the data 22 | print "=== Read the input data ===" 23 | X_text , Y = get_data(INPUT_FILE) 24 | #Get the relevant token dict 25 | print " ===Tokenizing Reviews === " 26 | token_idx,num_tokens = tokenize_text(X_text) 27 | print 'Num of unique tokens are',num_tokens 28 | max_tokens = get_max(X_text) 29 | print "Max number of tokens in a review are", max_tokens 30 | print "=== Creating Input Sequences ===" 31 | input_sequences = create_sequences(X_text, token_idx,max_tokens) 32 | print "=== Defining the model ===" 33 | model = define_model(num_tokens,max_tokens) 34 | print "=== Training the model===" 35 | model = train_model(model,input_sequences,Y) 36 | print "=== Testing the model with some inputs and the output is ===" 37 | test_model(model,token_idx,max_tokens) 38 | print "=== Saving Model ===" 39 | tfjs.converters.save_keras_model(model, OUTPUT_DIR) 40 | #model_save(model, OUTPUT_DIR) 41 | print "=== Saving the Token Index Dict for Tensorflow Js" 42 | create_csv(token_idx, 'token_index.csv',OUTPUT_DIR) 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/model_functions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This file defines the model functions to be used for training 5 | 6 | @author: ankit.jain 7 | """ 8 | import os 9 | import pandas as pd 10 | from tensorflow.python.keras.models import Sequential 11 | from tensorflow.python.keras.layers import Dense, GRU, Embedding 12 | from tensorflow.python.keras.optimizers import Adam 13 | from tensorflow.python.keras.models import save_model 14 | from preprocess_functions import create_sequences 15 | 16 | 17 | # Defining parameters 18 | BATCH_SIZE = 32 19 | EPOCHS =15 20 | VAL_SPLIT = 0.05 # Fraction of data to be used for validation 21 | EMBEDDING_SIZE =8 22 | 23 | 24 | def define_model(num_tokens,max_tokens): 25 | ''' 26 | Defines the model definition based on input parameters 27 | ''' 28 | model = Sequential() 29 | model.add(Embedding(input_dim=num_tokens, 30 | output_dim=EMBEDDING_SIZE, 31 | input_length=max_tokens, 32 | name='layer_embedding')) 33 | 34 | model.add(GRU(units=16, name = "gru_1",return_sequences=True)) 35 | model.add(GRU(units=8, name = "gru_2" ,return_sequences=True)) 36 | model.add(GRU(units=4, name= "gru_3")) 37 | model.add(Dense(1, activation='sigmoid',name="dense_1")) 38 | optimizer = Adam(lr=1e-3) 39 | model.compile(loss='binary_crossentropy', 40 | optimizer=optimizer, 41 | metrics=['accuracy']) 42 | print model.summary() 43 | return model 44 | 45 | 46 | def train_model(model,input_sequences,y_train): 47 | ''' 48 | Train the model based on input parameters 49 | ''' 50 | 51 | model.fit(input_sequences, y_train, 52 | validation_split=VAL_SPLIT, epochs=EPOCHS, batch_size=BATCH_SIZE) 53 | return model 54 | 55 | def test_model(model,token_idx,max_tokens): 56 | ''' 57 | Testing the model on sample data 58 | ''' 59 | txt = ["awesome movie","Terrible movie","that movie really sucks","I like that movie","hate the movie"] 60 | pred = model.predict(create_sequences(txt,token_idx,max_tokens)) 61 | pred = [pred[i][0] for i in range(len(txt))] 62 | output_df = pd.DataFrame({"Review Text": txt, "Prediction Score": pred}) 63 | output_df = output_df.loc[:,['Review Text','Prediction Score']] 64 | 65 | print output_df 66 | 67 | def model_save(model, output_dir): 68 | ''' 69 | Saving the model 70 | ''' 71 | output_file = os.path.join(output_dir,"sentiment_analysis_model.h5" ) 72 | save_model(model,output_file,overwrite=True,include_optimizer=True) 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/preprocess_functions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This script contains the functions to preprocess the text data for neural network model 5 | @author: ankit.jain 6 | """ 7 | import os 8 | import csv 9 | import random 10 | import re 11 | import numpy as np 12 | from tensorflow.python.keras.preprocessing.sequence import pad_sequences 13 | 14 | def get_data(filename): 15 | ''' 16 | Reads the data from a text file 17 | ''' 18 | with open(filename, 'r') as f: 19 | target = [] 20 | text_data = [] 21 | lines = f.readlines() 22 | random.shuffle(lines) 23 | for line in lines: 24 | data = line.split('\t') 25 | if len(data) == 2: 26 | target.append(int(data[0])) 27 | text_data.append(data[1].rstrip()) 28 | return text_data,target 29 | 30 | 31 | def get_processed_tokens(text): 32 | ''' 33 | Gets Token List from a Review 34 | ''' 35 | filtered_text = re.sub(r'[^a-zA-Z0-9\s]', '', text) #Removing Punctuations 36 | filtered_text = filtered_text.split() 37 | filtered_text = [token.lower() for token in filtered_text] 38 | return filtered_text 39 | 40 | 41 | def tokenize_text(data_text, min_frequency =5): 42 | ''' 43 | Tokenizes the reviews in the dataset. Filters non frequent tokens 44 | ''' 45 | review_tokens = [get_processed_tokens(review) for review in data_text] # Tokenize the sentences 46 | token_list = [token for review in review_tokens for token in review] #Convert to single list 47 | token_freq_dict = {token:token_list.count(token) for token in set(token_list)} # Get the frequency count of tokens 48 | most_freq_tokens = [tokens for tokens in token_freq_dict if token_freq_dict[tokens] >= min_frequency] 49 | idx = range(len(most_freq_tokens)) 50 | token_idx = dict(zip(most_freq_tokens, idx)) 51 | return token_idx,len(most_freq_tokens) 52 | 53 | def get_max(data): 54 | ''' 55 | Get max length of the token 56 | ''' 57 | tokens_per_review = [len(txt.split()) for txt in data] 58 | return max(tokens_per_review) 59 | 60 | def create_sequences(data_text,token_idx,max_tokens): 61 | ''' 62 | Create sequences appropriate for GRU input 63 | Input: reviews data, token dict, max_tokens 64 | Output: padded_sequences of shape (len(data_text), max_tokens) 65 | ''' 66 | review_tokens = [get_processed_tokens(review) for review in data_text] # Tokenize the sentences 67 | #Covert the tokens to their indexes 68 | review_token_idx = map( lambda review: [token_idx[k] for k in review if k in token_idx.keys() ], review_tokens) 69 | padded_sequences = pad_sequences(review_token_idx, maxlen=max_tokens) 70 | return np.array(padded_sequences) 71 | 72 | def create_csv(token_idx,filename,output_dir): 73 | filename= os.path.join(output_dir,filename) 74 | with open(filename, 'w') as csvfile: 75 | writer = csv.writer(csvfile) 76 | for key in token_idx.keys(): 77 | writer.writerow([key,token_idx[key]]) 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.2.2 2 | astor==0.6.2 3 | backports.weakref==1.0.post1 4 | bleach==1.5.0 5 | enum34==1.1.6 6 | funcsigs==1.0.2 7 | futures==3.2.0 8 | gast==0.2.0 9 | grpcio==1.12.1 10 | h5py==2.7.1 11 | html5lib==0.9999999 12 | Keras==2.1.4 13 | Markdown==2.6.11 14 | mock==2.0.0 15 | numpy==1.14.1 16 | pandas==0.23.1 17 | pbr==4.0.4 18 | protobuf==3.6.0 19 | python-dateutil==2.7.3 20 | pytz==2018.4 21 | PyYAML==3.12 22 | scipy==1.1.0 23 | six==1.11.0 24 | tensorboard==1.8.0 25 | tensorflow==1.8.0 26 | tensorflow-hub==0.1.0 27 | tensorflowjs==0.4.1 28 | termcolor==1.1.0 29 | Theano==1.0.2 30 | Werkzeug==0.14.1 31 | -------------------------------------------------------------------------------- /Chapter03/TensorflowJS/token_index.csv: -------------------------------------------------------------------------------- 1 | all,262 2 | code,363 3 | freakin,364 4 | go,365 5 | hate,0 6 | kate,366 7 | retarted,408 8 | to,263 9 | trousers,1 10 | lord,2 11 | worth,137 12 | song,360 13 | very,264 14 | panting,367 15 | betterwe,3 16 | every,4 17 | awful,267 18 | cool,138 19 | school,5 20 | did,139 21 | try,298 22 | harry,268 23 | guy,140 24 | enjoy,6 25 | says,141 26 | grabs,269 27 | past,270 28 | second,7 29 | even,8 30 | what,272 31 | new,9 32 | ever,10 33 | told,11 34 | never,12 35 | here,13 36 | wait,142 37 | great,369 38 | kids,86 39 | didnt,380 40 | dudeee,14 41 | action,100 42 | opinion,370 43 | thats,371 44 | love,143 45 | suicides,144 46 | from,354 47 | would,15 48 | only,201 49 | few,373 50 | angels,146 51 | tell,18 52 | more,275 53 | knows,276 54 | acceptable,147 55 | excellent,19 56 | iii,376 57 | glad,277 58 | must,20 59 | me,21 60 | balls,377 61 | this,378 62 | sentry,279 63 | sucks,280 64 | movies,22 65 | can,149 66 | making,150 67 | my,23 68 | beautiful,281 69 | crazy,151 70 | give,24 71 | awesome,152 72 | in,93 73 | days,52 74 | heard,153 75 | something,381 76 | want,25 77 | station,309 78 | needs,283 79 | end,26 80 | noises,323 81 | get,204 82 | how,27 83 | amazing,28 84 | aching,29 85 | okay,284 86 | may,155 87 | after,30 88 | wrong,31 89 | lot,398 90 | such,156 91 | suck,157 92 | man,158 93 | a,285 94 | sit,218 95 | so,159 96 | fan,266 97 | talk,160 98 | over,33 99 | zen,34 100 | mission,333 101 | course,161 102 | through,208 103 | looks,414 104 | cruise,383 105 | still,162 106 | its,288 107 | before,35 108 | 25,289 109 | felicia,163 110 | interesting,164 111 | actually,290 112 | better,36 113 | differently,37 114 | main,165 115 | might,291 116 | then,222 117 | them,38 118 | good,292 119 | crash,384 120 | sucking,293 121 | material,386 122 | they,39 123 | not,167 124 | now,168 125 | day,387 126 | sucked,294 127 | desperately,295 128 | profound,388 129 | always,237 130 | rocks,40 131 | doesnt,440 132 | went,41 133 | side,42 134 | mean,43 135 | everyone,296 136 | doing,389 137 | series,44 138 | yeah,169 139 | books,390 140 | xmen,46 141 | combining,47 142 | our,391 143 | really,297 144 | god,261 145 | since,299 146 | daniel,48 147 | hill,300 148 | got,49 149 | ass,301 150 | bye,422 151 | friday,302 152 | little,50 153 | quite,171 154 | wanted,51 155 | havent,368 156 | care,172 157 | could,393 158 | terrible,303 159 | times,394 160 | thing,174 161 | place,175 162 | brokeback,176 163 | think,177 164 | lovethe,196 165 | first,178 166 | likeyeah,53 167 | already,287 168 | dont,304 169 | feel,305 170 | potter,179 171 | hoot,54 172 | one,180 173 | lol,251 174 | impossible,182 175 | miss,306 176 | story,307 177 | silent,183 178 | start,396 179 | least,308 180 | anyone,184 181 | their,397 182 | 2,185 183 | too,55 184 | tom,56 185 | stars,252 186 | that,186 187 | depressing,399 188 | off,429 189 | than,187 190 | loved,400 191 | kind,310 192 | b,311 193 | boring,401 194 | were,188 195 | and,189 196 | groaning,57 197 | turned,190 198 | slap,191 199 | sad,192 200 | dash,193 201 | talking,59 202 | say,194 203 | have,402 204 | throat,239 205 | seen,60 206 | saw,195 207 | mountain,312 208 | escapades,313 209 | also,314 210 | take,197 211 | which,404 212 | laughed,61 213 | begin,198 214 | sure,199 215 | though,63 216 | tye,64 217 | who,405 218 | most,315 219 | why,119 220 | absolutely,350 221 | kelsie,406 222 | dragged,62 223 | hips,317 224 | joining,318 225 | saying,65 226 | show,200 227 | anyway,409 228 | enjoyed,66 229 | acne,67 230 | should,410 231 | theme,68 232 | going,202 233 | black,203 234 | lubb,374 235 | pretty,319 236 | vigor,411 237 | do,69 238 | his,320 239 | watching,154 240 | watch,382 241 | da,70 242 | him,321 243 | freaking,205 244 | catcher,322 245 | officially,71 246 | jokes,206 247 | bad,72 248 | stupid,207 249 | she,413 250 | where,173 251 | see,324 252 | are,325 253 | best,73 254 | said,74 255 | luv,278 256 | movie,326 257 | review,209 258 | rings,75 259 | demons,327 260 | 3,210 261 | won,328 262 | horrible,415 263 | probably,412 264 | reading,211 265 | we,76 266 | awards,212 267 | news,77 268 | kirsten,213 269 | both,329 270 | last,330 271 | ill,78 272 | coz,79 273 | s,214 274 | liked,331 275 | had,107 276 | blonds,81 277 | community,332 278 | homosexuality,215 279 | likes,432 280 | table,416 281 | three,82 282 | been,83 283 | whos,334 284 | much,84 285 | life,85 286 | gay,335 287 | else,336 288 | felicias,216 289 | personally,87 290 | fandom,58 291 | those,217 292 | novel,265 293 | bobbypin,88 294 | these,219 295 | plain,423 296 | will,271 297 | while,337 298 | fun,338 299 | cock,220 300 | theres,339 301 | ive,221 302 | almost,424 303 | is,89 304 | insanely,425 305 | it,90 306 | helped,426 307 | cant,91 308 | im,92 309 | whimpering,223 310 | snuck,282 311 | id,94 312 | if,95 313 | things,96 314 | make,97 315 | reminded,286 316 | same,224 317 | shit,128 318 | used,45 319 | keys,340 320 | evil,99 321 | cleaning,341 322 | totally,225 323 | theater,226 324 | i,430 325 | youre,101 326 | well,431 327 | thought,273 328 | person,342 329 | the,102 330 | dads,103 331 | left,104 332 | just,105 333 | being,227 334 | money,228 335 | yet,106 336 | letting,343 337 | thinking,229 338 | hated,344 339 | character,80 340 | other,446 341 | has,108 342 | hat,109 343 | hates,345 344 | real,230 345 | around,231 346 | read,232 347 | big,98 348 | gonna,234 349 | bitch,395 350 | know,434 351 | mom,235 352 | world,236 353 | bit,346 354 | like,435 355 | lost,32 356 | malfoy,385 357 | either,352 358 | night,110 359 | hung,111 360 | bonkers,112 361 | right,113 362 | people,118 363 | fucking,347 364 | some,348 365 | back,349 366 | oh,242 367 | cowboys,238 368 | for,114 369 | leah,145 370 | does,437 371 | bogus,351 372 | be,353 373 | inaccurate,438 374 | crap,372 375 | by,136 376 | on,240 377 | about,375 378 | ok,241 379 | anything,355 380 | cowboy,148 381 | of,243 382 | outshines,439 383 | stand,244 384 | wotshisface,356 385 | or,245 386 | rockhard,115 387 | seeing,357 388 | into,358 389 | soo,166 390 | two,274 391 | down,116 392 | because,436 393 | gary,246 394 | quiz,417 395 | your,247 396 | despised,117 397 | hes,248 398 | apparently,403 399 | there,249 400 | hey,250 401 | long,181 402 | virgin,120 403 | way,121 404 | jane,122 405 | gin,316 406 | was,123 407 | head,124 408 | becoming,125 409 | but,441 410 | hear,126 411 | with,253 412 | he,442 413 | count,443 414 | made,444 415 | type,17 416 | mtv,127 417 | up,359 418 | us,129 419 | until,16 420 | stories,445 421 | year,170 422 | am,254 423 | deep,255 424 | an,421 425 | as,256 426 | at,257 427 | watched,427 428 | draco,233 429 | film,258 430 | again,428 431 | vinci,418 432 | no,130 433 | when,379 434 | reality,131 435 | book,433 436 | sick,447 437 | you,259 438 | out,392 439 | picture,132 440 | dies,133 441 | kinda,448 442 | crappy,419 443 | friends,449 444 | goin,450 445 | finished,420 446 | eyre,451 447 | hella,361 448 | fact,407 449 | time,134 450 | far,362 451 | serious,135 452 | having,260 453 | -------------------------------------------------------------------------------- /Chapter03/group1-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/group1-shard1of1 -------------------------------------------------------------------------------- /Chapter03/group2-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/group2-shard1of1 -------------------------------------------------------------------------------- /Chapter03/group3-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/group3-shard1of1 -------------------------------------------------------------------------------- /Chapter03/group4-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/group4-shard1of1 -------------------------------------------------------------------------------- /Chapter03/group5-shard1of1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter03/group5-shard1of1 -------------------------------------------------------------------------------- /Chapter03/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This script takes the input data and trains a sentiment analysis model using neural networks 5 | @author: ankit.jain 6 | """ 7 | 8 | from preprocess_functions import * # Importing all preprocessing functions 9 | from model_functions import * # Importing all model functions 10 | import tensorflow as tf 11 | import os 12 | import tensorflowjs as tfjs 13 | 14 | 15 | # PARAMETERS 16 | current_dir = os.path.dirname(os.path.realpath(__file__)) 17 | INPUT_FILE = os.path.join(current_dir, "sentiment.txt") 18 | OUTPUT_DIR = current_dir 19 | 20 | def main(): 21 | #Read and preprocessing the data 22 | print "=== Read the input data ===" 23 | X_text , Y = get_data(INPUT_FILE) 24 | #Get the relevant token dict 25 | print " ===Tokenizing Reviews === " 26 | token_idx,num_tokens = tokenize_text(X_text) 27 | print 'Num of unique tokens are',num_tokens 28 | max_tokens = get_max(X_text) 29 | print "Max number of tokens in a review are", max_tokens 30 | print "=== Creating Input Sequences ===" 31 | input_sequences = create_sequences(X_text, token_idx,max_tokens) 32 | print "=== Defining the model ===" 33 | model = define_model(num_tokens,max_tokens) 34 | print "=== Training the model===" 35 | model = train_model(model,input_sequences,Y) 36 | print "=== Testing the model with some inputs and the output is ===" 37 | test_model(model,token_idx,max_tokens) 38 | print "=== Saving Model ===" 39 | tfjs.converters.save_keras_model(model, OUTPUT_DIR) 40 | #model_save(model, OUTPUT_DIR) 41 | print "=== Saving the Token Index Dict for Tensorflow Js" 42 | create_csv(token_idx, 'token_index.csv',OUTPUT_DIR) 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /Chapter03/model_functions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This file defines the model functions to be used for training 5 | 6 | @author: ankit.jain 7 | """ 8 | import os 9 | import pandas as pd 10 | from tensorflow.python.keras.models import Sequential 11 | from tensorflow.python.keras.layers import Dense, GRU, Embedding 12 | from tensorflow.python.keras.optimizers import Adam 13 | from tensorflow.python.keras.models import save_model 14 | from preprocess_functions import create_sequences 15 | 16 | 17 | # Defining parameters 18 | BATCH_SIZE = 32 19 | EPOCHS =15 20 | VAL_SPLIT = 0.05 # Fraction of data to be used for validation 21 | EMBEDDING_SIZE =8 22 | 23 | 24 | def define_model(num_tokens,max_tokens): 25 | ''' 26 | Defines the model definition based on input parameters 27 | ''' 28 | model = Sequential() 29 | model.add(Embedding(input_dim=num_tokens, 30 | output_dim=EMBEDDING_SIZE, 31 | input_length=max_tokens, 32 | name='layer_embedding')) 33 | 34 | model.add(GRU(units=16, name = "gru_1",return_sequences=True)) 35 | model.add(GRU(units=8, name = "gru_2" ,return_sequences=True)) 36 | model.add(GRU(units=4, name= "gru_3")) 37 | model.add(Dense(1, activation='sigmoid',name="dense_1")) 38 | optimizer = Adam(lr=1e-3) 39 | model.compile(loss='binary_crossentropy', 40 | optimizer=optimizer, 41 | metrics=['accuracy']) 42 | print model.summary() 43 | return model 44 | 45 | 46 | def train_model(model,input_sequences,y_train): 47 | ''' 48 | Train the model based on input parameters 49 | ''' 50 | 51 | model.fit(input_sequences, y_train, 52 | validation_split=VAL_SPLIT, epochs=EPOCHS, batch_size=BATCH_SIZE) 53 | return model 54 | 55 | def test_model(model,token_idx,max_tokens): 56 | ''' 57 | Testing the model on sample data 58 | ''' 59 | txt = ["awesome movie","Terrible movie","that movie really sucks","I like that movie","hate the movie"] 60 | pred = model.predict(create_sequences(txt,token_idx,max_tokens)) 61 | pred = [pred[i][0] for i in range(len(txt))] 62 | output_df = pd.DataFrame({"Review Text": txt, "Prediction Score": pred}) 63 | output_df = output_df.loc[:,['Review Text','Prediction Score']] 64 | 65 | print output_df 66 | 67 | def model_save(model, output_dir): 68 | ''' 69 | Saving the model 70 | ''' 71 | output_file = os.path.join(output_dir,"sentiment_analysis_model.h5" ) 72 | save_model(model,output_file,overwrite=True,include_optimizer=True) 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /Chapter03/preprocess_functions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This script contains the functions to preprocess the text data for neural network model 5 | @author: ankit.jain 6 | """ 7 | import os 8 | import csv 9 | import random 10 | import re 11 | import numpy as np 12 | from tensorflow.python.keras.preprocessing.sequence import pad_sequences 13 | 14 | def get_data(filename): 15 | ''' 16 | Reads the data from a text file 17 | ''' 18 | with open(filename, 'r') as f: 19 | target = [] 20 | text_data = [] 21 | lines = f.readlines() 22 | random.shuffle(lines) 23 | for line in lines: 24 | data = line.split('\t') 25 | if len(data) == 2: 26 | target.append(int(data[0])) 27 | text_data.append(data[1].rstrip()) 28 | return text_data,target 29 | 30 | 31 | def get_processed_tokens(text): 32 | ''' 33 | Gets Token List from a Review 34 | ''' 35 | filtered_text = re.sub(r'[^a-zA-Z0-9\s]', '', text) #Removing Punctuations 36 | filtered_text = filtered_text.split() 37 | filtered_text = [token.lower() for token in filtered_text] 38 | return filtered_text 39 | 40 | 41 | def tokenize_text(data_text, min_frequency =5): 42 | ''' 43 | Tokenizes the reviews in the dataset. Filters non frequent tokens 44 | ''' 45 | review_tokens = [get_processed_tokens(review) for review in data_text] # Tokenize the sentences 46 | token_list = [token for review in review_tokens for token in review] #Convert to single list 47 | token_freq_dict = {token:token_list.count(token) for token in set(token_list)} # Get the frequency count of tokens 48 | most_freq_tokens = [tokens for tokens in token_freq_dict if token_freq_dict[tokens] >= min_frequency] 49 | idx = range(len(most_freq_tokens)) 50 | token_idx = dict(zip(most_freq_tokens, idx)) 51 | return token_idx,len(most_freq_tokens) 52 | 53 | def get_max(data): 54 | ''' 55 | Get max length of the token 56 | ''' 57 | tokens_per_review = [len(txt.split()) for txt in data] 58 | return max(tokens_per_review) 59 | 60 | def create_sequences(data_text,token_idx,max_tokens): 61 | ''' 62 | Create sequences appropriate for GRU input 63 | Input: reviews data, token dict, max_tokens 64 | Output: padded_sequences of shape (len(data_text), max_tokens) 65 | ''' 66 | review_tokens = [get_processed_tokens(review) for review in data_text] # Tokenize the sentences 67 | #Covert the tokens to their indexes 68 | review_token_idx = map( lambda review: [token_idx[k] for k in review if k in token_idx.keys() ], review_tokens) 69 | padded_sequences = pad_sequences(review_token_idx, maxlen=max_tokens) 70 | return np.array(padded_sequences) 71 | 72 | def create_csv(token_idx,filename,output_dir): 73 | filename= os.path.join(output_dir,filename) 74 | with open(filename, 'w') as csvfile: 75 | writer = csv.writer(csvfile) 76 | for key in token_idx.keys(): 77 | writer.writerow([key,token_idx[key]]) 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /Chapter03/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.2.2 2 | astor==0.6.2 3 | backports.weakref==1.0.post1 4 | bleach==1.5.0 5 | enum34==1.1.6 6 | funcsigs==1.0.2 7 | futures==3.2.0 8 | gast==0.2.0 9 | grpcio==1.12.1 10 | h5py==2.7.1 11 | html5lib==0.9999999 12 | Keras==2.1.4 13 | Markdown==2.6.11 14 | mock==2.0.0 15 | numpy==1.14.1 16 | pandas==0.23.1 17 | pbr==4.0.4 18 | protobuf==3.6.0 19 | python-dateutil==2.7.3 20 | pytz==2018.4 21 | PyYAML==3.12 22 | scipy==1.1.0 23 | six==1.11.0 24 | tensorboard==1.8.0 25 | tensorflow==1.8.0 26 | tensorflow-hub==0.1.0 27 | tensorflowjs==0.4.1 28 | termcolor==1.1.0 29 | Theano==1.0.2 30 | Werkzeug==0.14.1 31 | -------------------------------------------------------------------------------- /Chapter03/token_index.csv: -------------------------------------------------------------------------------- 1 | all,262 2 | code,363 3 | freakin,364 4 | go,365 5 | hate,0 6 | kate,366 7 | retarted,408 8 | to,263 9 | trousers,1 10 | lord,2 11 | worth,137 12 | song,360 13 | very,264 14 | panting,367 15 | betterwe,3 16 | every,4 17 | awful,267 18 | cool,138 19 | school,5 20 | did,139 21 | try,298 22 | harry,268 23 | guy,140 24 | enjoy,6 25 | says,141 26 | grabs,269 27 | past,270 28 | second,7 29 | even,8 30 | what,272 31 | new,9 32 | ever,10 33 | told,11 34 | never,12 35 | here,13 36 | wait,142 37 | great,369 38 | kids,86 39 | didnt,380 40 | dudeee,14 41 | action,100 42 | opinion,370 43 | thats,371 44 | love,143 45 | suicides,144 46 | from,354 47 | would,15 48 | only,201 49 | few,373 50 | angels,146 51 | tell,18 52 | more,275 53 | knows,276 54 | acceptable,147 55 | excellent,19 56 | iii,376 57 | glad,277 58 | must,20 59 | me,21 60 | balls,377 61 | this,378 62 | sentry,279 63 | sucks,280 64 | movies,22 65 | can,149 66 | making,150 67 | my,23 68 | beautiful,281 69 | crazy,151 70 | give,24 71 | awesome,152 72 | in,93 73 | days,52 74 | heard,153 75 | something,381 76 | want,25 77 | station,309 78 | needs,283 79 | end,26 80 | noises,323 81 | get,204 82 | how,27 83 | amazing,28 84 | aching,29 85 | okay,284 86 | may,155 87 | after,30 88 | wrong,31 89 | lot,398 90 | such,156 91 | suck,157 92 | man,158 93 | a,285 94 | sit,218 95 | so,159 96 | fan,266 97 | talk,160 98 | over,33 99 | zen,34 100 | mission,333 101 | course,161 102 | through,208 103 | looks,414 104 | cruise,383 105 | still,162 106 | its,288 107 | before,35 108 | 25,289 109 | felicia,163 110 | interesting,164 111 | actually,290 112 | better,36 113 | differently,37 114 | main,165 115 | might,291 116 | then,222 117 | them,38 118 | good,292 119 | crash,384 120 | sucking,293 121 | material,386 122 | they,39 123 | not,167 124 | now,168 125 | day,387 126 | sucked,294 127 | desperately,295 128 | profound,388 129 | always,237 130 | rocks,40 131 | doesnt,440 132 | went,41 133 | side,42 134 | mean,43 135 | everyone,296 136 | doing,389 137 | series,44 138 | yeah,169 139 | books,390 140 | xmen,46 141 | combining,47 142 | our,391 143 | really,297 144 | god,261 145 | since,299 146 | daniel,48 147 | hill,300 148 | got,49 149 | ass,301 150 | bye,422 151 | friday,302 152 | little,50 153 | quite,171 154 | wanted,51 155 | havent,368 156 | care,172 157 | could,393 158 | terrible,303 159 | times,394 160 | thing,174 161 | place,175 162 | brokeback,176 163 | think,177 164 | lovethe,196 165 | first,178 166 | likeyeah,53 167 | already,287 168 | dont,304 169 | feel,305 170 | potter,179 171 | hoot,54 172 | one,180 173 | lol,251 174 | impossible,182 175 | miss,306 176 | story,307 177 | silent,183 178 | start,396 179 | least,308 180 | anyone,184 181 | their,397 182 | 2,185 183 | too,55 184 | tom,56 185 | stars,252 186 | that,186 187 | depressing,399 188 | off,429 189 | than,187 190 | loved,400 191 | kind,310 192 | b,311 193 | boring,401 194 | were,188 195 | and,189 196 | groaning,57 197 | turned,190 198 | slap,191 199 | sad,192 200 | dash,193 201 | talking,59 202 | say,194 203 | have,402 204 | throat,239 205 | seen,60 206 | saw,195 207 | mountain,312 208 | escapades,313 209 | also,314 210 | take,197 211 | which,404 212 | laughed,61 213 | begin,198 214 | sure,199 215 | though,63 216 | tye,64 217 | who,405 218 | most,315 219 | why,119 220 | absolutely,350 221 | kelsie,406 222 | dragged,62 223 | hips,317 224 | joining,318 225 | saying,65 226 | show,200 227 | anyway,409 228 | enjoyed,66 229 | acne,67 230 | should,410 231 | theme,68 232 | going,202 233 | black,203 234 | lubb,374 235 | pretty,319 236 | vigor,411 237 | do,69 238 | his,320 239 | watching,154 240 | watch,382 241 | da,70 242 | him,321 243 | freaking,205 244 | catcher,322 245 | officially,71 246 | jokes,206 247 | bad,72 248 | stupid,207 249 | she,413 250 | where,173 251 | see,324 252 | are,325 253 | best,73 254 | said,74 255 | luv,278 256 | movie,326 257 | review,209 258 | rings,75 259 | demons,327 260 | 3,210 261 | won,328 262 | horrible,415 263 | probably,412 264 | reading,211 265 | we,76 266 | awards,212 267 | news,77 268 | kirsten,213 269 | both,329 270 | last,330 271 | ill,78 272 | coz,79 273 | s,214 274 | liked,331 275 | had,107 276 | blonds,81 277 | community,332 278 | homosexuality,215 279 | likes,432 280 | table,416 281 | three,82 282 | been,83 283 | whos,334 284 | much,84 285 | life,85 286 | gay,335 287 | else,336 288 | felicias,216 289 | personally,87 290 | fandom,58 291 | those,217 292 | novel,265 293 | bobbypin,88 294 | these,219 295 | plain,423 296 | will,271 297 | while,337 298 | fun,338 299 | cock,220 300 | theres,339 301 | ive,221 302 | almost,424 303 | is,89 304 | insanely,425 305 | it,90 306 | helped,426 307 | cant,91 308 | im,92 309 | whimpering,223 310 | snuck,282 311 | id,94 312 | if,95 313 | things,96 314 | make,97 315 | reminded,286 316 | same,224 317 | shit,128 318 | used,45 319 | keys,340 320 | evil,99 321 | cleaning,341 322 | totally,225 323 | theater,226 324 | i,430 325 | youre,101 326 | well,431 327 | thought,273 328 | person,342 329 | the,102 330 | dads,103 331 | left,104 332 | just,105 333 | being,227 334 | money,228 335 | yet,106 336 | letting,343 337 | thinking,229 338 | hated,344 339 | character,80 340 | other,446 341 | has,108 342 | hat,109 343 | hates,345 344 | real,230 345 | around,231 346 | read,232 347 | big,98 348 | gonna,234 349 | bitch,395 350 | know,434 351 | mom,235 352 | world,236 353 | bit,346 354 | like,435 355 | lost,32 356 | malfoy,385 357 | either,352 358 | night,110 359 | hung,111 360 | bonkers,112 361 | right,113 362 | people,118 363 | fucking,347 364 | some,348 365 | back,349 366 | oh,242 367 | cowboys,238 368 | for,114 369 | leah,145 370 | does,437 371 | bogus,351 372 | be,353 373 | inaccurate,438 374 | crap,372 375 | by,136 376 | on,240 377 | about,375 378 | ok,241 379 | anything,355 380 | cowboy,148 381 | of,243 382 | outshines,439 383 | stand,244 384 | wotshisface,356 385 | or,245 386 | rockhard,115 387 | seeing,357 388 | into,358 389 | soo,166 390 | two,274 391 | down,116 392 | because,436 393 | gary,246 394 | quiz,417 395 | your,247 396 | despised,117 397 | hes,248 398 | apparently,403 399 | there,249 400 | hey,250 401 | long,181 402 | virgin,120 403 | way,121 404 | jane,122 405 | gin,316 406 | was,123 407 | head,124 408 | becoming,125 409 | but,441 410 | hear,126 411 | with,253 412 | he,442 413 | count,443 414 | made,444 415 | type,17 416 | mtv,127 417 | up,359 418 | us,129 419 | until,16 420 | stories,445 421 | year,170 422 | am,254 423 | deep,255 424 | an,421 425 | as,256 426 | at,257 427 | watched,427 428 | draco,233 429 | film,258 430 | again,428 431 | vinci,418 432 | no,130 433 | when,379 434 | reality,131 435 | book,433 436 | sick,447 437 | you,259 438 | out,392 439 | picture,132 440 | dies,133 441 | kinda,448 442 | crappy,419 443 | friends,449 444 | goin,450 445 | finished,420 446 | eyre,451 447 | hella,361 448 | fact,407 449 | time,134 450 | far,362 451 | serious,135 452 | having,260 453 | -------------------------------------------------------------------------------- /Chapter04/README.md: -------------------------------------------------------------------------------- 1 | # Digit Classification using Tensorflow Lite 2 | This folder contains the code to build a deep learning model on MNIST handwritten digit dataset and converting the trained model to TF Lite format. 3 | 4 | ### Dataset 5 | * https://www.tensorflow.org/guide/datasets 6 | 7 | ### Installations 8 | * This code is checked on using native Python 3 with anaconda 9 | * Create a conda virtual environment and install relevant packages using requirements.txt file 10 | ``` 11 | pip install requirements.txt 12 | ``` 13 | ### Python Code Run Instructions 14 | To run the code just execute 15 | ``` 16 | python main.py 17 | ``` 18 | On CPU the code might take few mins to run. However, if you use GPUs it should be much faster 19 | #### Dataset 20 | Dataset used for this dataset is the standard MNIST handwritten digits dataset available in Tensorflow Datasets. 21 | 22 | ### Code Details 23 | Code is pretty self explanatory. There are mainly 3 files in implementation: 24 | * main.py -- Implements the main function and also implements model building and training routines. 25 | * parameters.py -- Defines the parameters used in the code. 26 | * utils.py -- Contains the helper functions for the code 27 | 28 | This code implements graph freezing and optimizing but will have to use Tensorflow Optimization Converter Tool (toco) to convert 29 | the optimized graph to .tflite format. 30 | 31 | -------------------------------------------------------------------------------- /Chapter04/Tensorflow_Lite/README.md: -------------------------------------------------------------------------------- 1 | # Digit Classification using Tensorflow Lite 2 | This folder contains the code to build a deep learning model on MNIST handwritten digit dataset and converting the trained model to TF Lite format. 3 | ### Installations 4 | * This code is checked on using native Python 3 with anaconda 5 | * Create a conda virtual environment and install relevant packages using requirements.txt file 6 | ``` 7 | pip install requirements.txt 8 | ``` 9 | ### Python Code Run Instructions 10 | To run the code just execute 11 | ``` 12 | python main.py 13 | ``` 14 | On CPU the code might take few mins to run. However, if you use GPUs it should be much faster 15 | #### Dataset 16 | Dataset used for this dataset is the standard MNIST handwritten digits dataset available in Tensorflow Datasets. 17 | 18 | ### Code Details 19 | Code is pretty self explanatory. There are mainly 3 files in implementation: 20 | * main.py -- Implements the main function and also implements model building and training routines. 21 | * parameters.py -- Defines the parameters used in the code. 22 | * utils.py -- Contains the helper functions for the code 23 | 24 | This code implements graph freezing and optimizing but will have to use Tensorflow Optimization Converter Tool (toco) to convert 25 | the optimized graph to .tflite format. 26 | 27 | -------------------------------------------------------------------------------- /Chapter04/Tensorflow_Lite/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import keras 3 | from keras.datasets import mnist 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Dropout, Flatten 6 | from keras.layers import Conv2D, MaxPooling2D,Activation 7 | from keras.callbacks import TensorBoard 8 | from keras import backend as K 9 | 10 | from utils import * 11 | 12 | 13 | # Creating the relevant directories 14 | MODEL_DIR = create_model_dir() 15 | FREEZE_GRAPH_DIR = create_freeze_graph_dir(MODEL_DIR) 16 | OPTIMIZED_GRAPH_DIR =create_optimized_graph_dir(MODEL_DIR) 17 | 18 | 19 | 20 | def prepare_training_data(): 21 | # the data, split between train and test sets 22 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 23 | 24 | x_train = x_train.reshape(x_train.shape[0], IMAGE_SIZE, IMAGE_SIZE, 1) 25 | x_test = x_test.reshape(x_test.shape[0], IMAGE_SIZE, IMAGE_SIZE, 1) 26 | x_train = x_train.astype('float32') 27 | x_test = x_test.astype('float32') 28 | x_train /= 255 29 | x_test /= 255 30 | print('x_train shape:', x_train.shape) 31 | print(x_train.shape[0], 'Num Training images') 32 | print(x_test.shape[0], 'Num Testing images') 33 | 34 | # Converting the target variable to categorical 35 | y_train = keras.utils.to_categorical(y_train, NUM_CLASSES) 36 | y_test = keras.utils.to_categorical(y_test, NUM_CLASSES) 37 | 38 | return x_train,x_test,y_train,y_test 39 | 40 | class Model(): 41 | def __init__(self): 42 | self.x_train, self.x_test,self.y_train, self.y_test = prepare_training_data() 43 | self.model = self.define_model() 44 | 45 | def define_model(self): 46 | model = Sequential() 47 | model.add(Conv2D(32, kernel_size=(3, 3), 48 | activation='relu', 49 | input_shape=INPUT_SHAPE)) 50 | model.add(Conv2D(64, (3, 3), activation='relu')) 51 | model.add(MaxPooling2D(pool_size=(2, 2))) 52 | model.add(Dropout(0.25)) 53 | model.add(Flatten()) 54 | model.add(Dense(128, activation='relu')) 55 | model.add(Dropout(0.5)) 56 | 57 | model.add(Dense(NUM_CLASSES)) 58 | model.add(Activation('softmax', name = 'softmax_tensor')) 59 | 60 | model.compile(loss=keras.losses.categorical_crossentropy, 61 | optimizer=keras.optimizers.Adadelta(), 62 | metrics=['accuracy']) 63 | tensorboard = TensorBoard(log_dir=MODEL_DIR) 64 | self.model = model 65 | self.tensorboard = tensorboard 66 | 67 | def train_model(self): 68 | self.model.fit(self.x_train, self.y_train, 69 | batch_size=BATCH_SIZE, 70 | epochs=EPOCHS, 71 | verbose=1, 72 | validation_data=(self.x_test, self.y_test), 73 | callbacks = [self.tensorboard]) 74 | score = self.model.evaluate(self.x_test, self.y_test, verbose=0) 75 | print('Test loss:', score[0]) 76 | print('Test accuracy:', score[1]) 77 | 78 | def main(): 79 | 80 | # Load and prepare training data 81 | model = Model() 82 | print ("Defining Model") 83 | model.define_model() 84 | print ("Training Model") 85 | model.train_model() 86 | print ("Creating Frozen Graph") 87 | sess = K.get_session() 88 | create_frozen_graph(sess, ['softmax_tensor_1/Softmax'],FREEZE_GRAPH_DIR) 89 | 90 | print ("Converting Frozen Graph To Tensorboard compatible file") 91 | pb_to_tensorboard(FREEZE_GRAPH_DIR, "freeze") 92 | 93 | print ("Optimizing the graph for inference") 94 | optimize_graph(FREEZE_GRAPH_DIR,OPTIMIZED_GRAPH_DIR) 95 | pb_to_tensorboard(OPTIMIZED_GRAPH_DIR,"optimize") 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | 101 | -------------------------------------------------------------------------------- /Chapter04/Tensorflow_Lite/parameters.py: -------------------------------------------------------------------------------- 1 | LOGGING_DIR = "./logs" 2 | FREEZE_FILE_NAME = 'MNIST_model.pb' 3 | OPTIMIZE_FILE_NAME = 'MNIST_optimized.pb' 4 | 5 | BATCH_SIZE = 128 6 | EPOCHS = 12 7 | 8 | # input image dimensions 9 | IMAGE_SIZE = 28 10 | NUM_CLASSES = 10 11 | INPUT_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 1) 12 | -------------------------------------------------------------------------------- /Chapter04/Tensorflow_Lite/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | graphviz==0.8 21 | grpcio==1.12.1 22 | h5py==2.8.0 23 | html5lib==0.9999999 24 | imageio==2.4.1 25 | Keras==2.2.4 26 | Keras-Applications==1.0.6 27 | Keras-Preprocessing==1.0.5 28 | kiwisolver==1.0.1 29 | linecache2==1.0.0 30 | Markdown==2.6.11 31 | matplotlib==2.2.2 32 | mkl-fft==1.0.4 33 | mkl-random==1.0.1 34 | mock==2.0.0 35 | more-itertools==4.3.0 36 | multipledispatch==0.6.0 37 | mxnet==0.11.0rc1 38 | networkx==2.2 39 | numpy==1.14.5 40 | olefile==0.45.1 41 | pandas==0.23.2 42 | pathlib2==2.3.2 43 | patsy==0.5.0 44 | pbr==4.2.0 45 | Pillow==5.3.0 46 | pluggy==0.7.1 47 | protobuf==3.6.1 48 | py==1.6.0 49 | pyparsing==2.2.0 50 | pytest==3.8.1 51 | python-dateutil==2.7.3 52 | pytz==2018.5 53 | PyWavelets==1.0.1 54 | PyYAML==3.13 55 | scandir==1.9.0 56 | scikit-image==0.14.0 57 | scikit-learn==0.20.0 58 | scipy==1.1.0 59 | seaborn==0.9.0 60 | singledispatch==3.4.0.3 61 | six==1.11.0 62 | statsmodels==0.9.0 63 | subprocess32==3.5.2 64 | tensorboard==1.11.0 65 | tensorflow==1.11.0 66 | tensorflow-probability==0.4.0 67 | termcolor==1.1.0 68 | toolz==0.9.0 69 | tornado==5.1 70 | tqdm==4.23.4 71 | traceback2==1.4.0 72 | unittest2==1.1.0 73 | Werkzeug==0.14.1 74 | wget==3.2 75 | -------------------------------------------------------------------------------- /Chapter04/Tensorflow_Lite/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from time import time 3 | import tensorflow as tf 4 | from tensorflow.python.platform import gfile 5 | from tensorflow.core.framework import graph_pb2 6 | from tensorflow.core.framework import node_def_pb2 7 | from tensorflow.python.framework.graph_util import convert_variables_to_constants 8 | from parameters import * 9 | 10 | 11 | def create_model_dir(): 12 | current_time = time() 13 | model_dir = LOGGING_DIR + "/model_files_{}".format(current_time) 14 | if not os.path.exists(model_dir): 15 | os.makedirs(model_dir) 16 | return model_dir 17 | 18 | def create_freeze_graph_dir(model_dir): 19 | freeze_graph_dir = os.path.join(model_dir, "freeze") 20 | if not os.path.exists(freeze_graph_dir): 21 | os.makedirs(freeze_graph_dir) 22 | return freeze_graph_dir 23 | 24 | def create_optimized_graph_dir(model_dir): 25 | optimized_graph_dir = os.path.join(model_dir, "optimized") 26 | if not os.path.exists(optimized_graph_dir): 27 | os.makedirs(optimized_graph_dir) 28 | return optimized_graph_dir 29 | 30 | def create_frozen_graph(sess,output_name,freeze_graph_dir): 31 | frozen_graph = freeze_session(sess, 32 | output_names=output_name) 33 | tf.train.write_graph(frozen_graph, freeze_graph_dir, FREEZE_FILE_NAME , as_text=False) 34 | 35 | def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True): 36 | """ 37 | Freezes the state of a session into a pruned computation graph. 38 | 39 | Converts the existing graph into a new graph where variable nodes are replaced by 40 | constants. New graph trims the existing graph of any operations which are not required 41 | to compute the requested output. 42 | 43 | outputs are removed. 44 | @param session The TensorFlow session to be frozen. 45 | @param keep_var_names A list of variable names that should not be frozen, 46 | or None to freeze all the variables in the graph. 47 | @param output_names Names of the relevant graph outputs. 48 | @param clear_devices Remove the device directives from the graph for better portability. 49 | @return The frozen graph definition. 50 | """ 51 | 52 | graph = session.graph 53 | with graph.as_default(): 54 | freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or [])) 55 | output_names = output_names or [] 56 | output_names += [v.op.name for v in tf.global_variables()] 57 | input_graph_def = graph.as_graph_def() 58 | if clear_devices: 59 | for node in input_graph_def.node: 60 | node.device = "" 61 | frozen_graph = convert_variables_to_constants(session, input_graph_def, 62 | output_names, freeze_var_names) 63 | return frozen_graph 64 | 65 | def pb_to_tensorboard(input_graph_dir,graph_type ="freeze"): 66 | ''' 67 | Converts the graph ".pb" file to Tensorboard readable format 68 | :param input_graph_dir: Directory where the graph file is stored 69 | :param graph_type: "freeze" or "optimize" depending on the operation. 70 | :return: Saves the file in the folder which can be opened through Tensorboard 71 | ''' 72 | file_name = "" 73 | if graph_type == "freeze": 74 | file_name = FREEZE_FILE_NAME 75 | elif graph_type == "optimize": 76 | file_name = OPTIMIZE_FILE_NAME 77 | 78 | with tf.Session() as sess: 79 | model_filename = input_graph_dir + "/" + file_name 80 | with gfile.FastGFile(model_filename, 'rb') as f: 81 | graph_def = tf.GraphDef() 82 | graph_def.ParseFromString(f.read()) 83 | g_in = tf.import_graph_def(graph_def) 84 | train_writer = tf.summary.FileWriter(input_graph_dir) 85 | train_writer.add_graph(sess.graph) 86 | 87 | def strip(input_graph, drop_scope, input_before, output_after, pl_name): 88 | ''' 89 | This function strips the drop_scope node from the graph. 90 | :param input_graph: Input graph 91 | :param drop_scope: Scope like "Dropout" which needs to be removed from the graph 92 | :param input_before: Input before the drop_scope 93 | :param output_after: Output after the drop_scope 94 | :param pl_name: Name of pl 95 | :return: stripped output graph 96 | ''' 97 | input_nodes = input_graph.node 98 | nodes_after_strip = [] 99 | for node in input_nodes: 100 | if node.name.startswith(drop_scope + '/'): 101 | continue 102 | 103 | if node.name == pl_name: 104 | continue 105 | 106 | new_node = node_def_pb2.NodeDef() 107 | new_node.CopyFrom(node) 108 | if new_node.name == output_after: 109 | new_input = [] 110 | for node_name in new_node.input: 111 | if node_name == drop_scope + '/cond/Merge': 112 | new_input.append(input_before) 113 | else: 114 | new_input.append(node_name) 115 | del new_node.input[:] 116 | new_node.input.extend(new_input) 117 | else: 118 | new_input= [] 119 | for node_name in new_node.input: 120 | if node_name == drop_scope + '/cond/Merge': 121 | new_input.append(input_before) 122 | else: 123 | new_input.append(node_name) 124 | del new_node.input[:] 125 | new_node.input.extend(new_input) 126 | 127 | nodes_after_strip.append(new_node) 128 | 129 | output_graph = graph_pb2.GraphDef() 130 | output_graph.node.extend(nodes_after_strip) 131 | return output_graph 132 | 133 | 134 | def optimize_graph(input_dir, output_dir): 135 | ''' 136 | This is used to optimize the frozen graph by removing any unnecessary ops 137 | :param input_dir: directory where input graph is stored. 138 | :param output_dir: directory where final graph should be stored. 139 | :return: None 140 | ''' 141 | input_graph = os.path.join(input_dir, FREEZE_FILE_NAME) 142 | output_graph = os.path.join(output_dir, OPTIMIZE_FILE_NAME) 143 | 144 | input_graph_def = tf.GraphDef() 145 | with tf.gfile.FastGFile(input_graph, "rb") as f: 146 | input_graph_def.ParseFromString(f.read()) 147 | 148 | output_graph_def = strip(input_graph_def, u'dropout_1', u'conv2d_2/bias', u'dense_1/kernel', u'training') 149 | output_graph_def = strip(output_graph_def, u'dropout_3', u'max_pooling2d_2/MaxPool', u'flatten_2/Shape', 150 | u'training') 151 | output_graph_def = strip(output_graph_def, u'dropout_4', u'dense_3/Relu', u'dense_4/kernel', u'training') 152 | output_graph_def = strip(output_graph_def, u'Adadelta_1', u'softmax_tensor_1/Softmax', 153 | u'training/Adadelta/Variable', u'training') 154 | output_graph_def = strip(output_graph_def, u'training', u'softmax_tensor_1/Softmax', 155 | u'_', u'training') 156 | 157 | with tf.gfile.GFile(output_graph, "wb") as f: 158 | f.write(output_graph_def.SerializeToString()) 159 | -------------------------------------------------------------------------------- /Chapter04/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import keras 3 | from keras.datasets import mnist 4 | from keras.models import Sequential 5 | from keras.layers import Dense, Dropout, Flatten 6 | from keras.layers import Conv2D, MaxPooling2D,Activation 7 | from keras.callbacks import TensorBoard 8 | from keras import backend as K 9 | 10 | from utils import * 11 | 12 | 13 | # Creating the relevant directories 14 | MODEL_DIR = create_model_dir() 15 | FREEZE_GRAPH_DIR = create_freeze_graph_dir(MODEL_DIR) 16 | OPTIMIZED_GRAPH_DIR =create_optimized_graph_dir(MODEL_DIR) 17 | 18 | 19 | 20 | def prepare_training_data(): 21 | # the data, split between train and test sets 22 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 23 | 24 | x_train = x_train.reshape(x_train.shape[0], IMAGE_SIZE, IMAGE_SIZE, 1) 25 | x_test = x_test.reshape(x_test.shape[0], IMAGE_SIZE, IMAGE_SIZE, 1) 26 | x_train = x_train.astype('float32') 27 | x_test = x_test.astype('float32') 28 | x_train /= 255 29 | x_test /= 255 30 | print('x_train shape:', x_train.shape) 31 | print(x_train.shape[0], 'Num Training images') 32 | print(x_test.shape[0], 'Num Testing images') 33 | 34 | # Converting the target variable to categorical 35 | y_train = keras.utils.to_categorical(y_train, NUM_CLASSES) 36 | y_test = keras.utils.to_categorical(y_test, NUM_CLASSES) 37 | 38 | return x_train,x_test,y_train,y_test 39 | 40 | class Model(): 41 | def __init__(self): 42 | self.x_train, self.x_test,self.y_train, self.y_test = prepare_training_data() 43 | self.model = self.define_model() 44 | 45 | def define_model(self): 46 | model = Sequential() 47 | model.add(Conv2D(32, kernel_size=(3, 3), 48 | activation='relu', 49 | input_shape=INPUT_SHAPE)) 50 | model.add(Conv2D(64, (3, 3), activation='relu')) 51 | model.add(MaxPooling2D(pool_size=(2, 2))) 52 | model.add(Dropout(0.25)) 53 | model.add(Flatten()) 54 | model.add(Dense(128, activation='relu')) 55 | model.add(Dropout(0.5)) 56 | 57 | model.add(Dense(NUM_CLASSES)) 58 | model.add(Activation('softmax', name = 'softmax_tensor')) 59 | 60 | model.compile(loss=keras.losses.categorical_crossentropy, 61 | optimizer=keras.optimizers.Adadelta(), 62 | metrics=['accuracy']) 63 | tensorboard = TensorBoard(log_dir=MODEL_DIR) 64 | self.model = model 65 | self.tensorboard = tensorboard 66 | 67 | def train_model(self): 68 | self.model.fit(self.x_train, self.y_train, 69 | batch_size=BATCH_SIZE, 70 | epochs=EPOCHS, 71 | verbose=1, 72 | validation_data=(self.x_test, self.y_test), 73 | callbacks = [self.tensorboard]) 74 | score = self.model.evaluate(self.x_test, self.y_test, verbose=0) 75 | print('Test loss:', score[0]) 76 | print('Test accuracy:', score[1]) 77 | 78 | def main(): 79 | 80 | # Load and prepare training data 81 | model = Model() 82 | print ("Defining Model") 83 | model.define_model() 84 | print ("Training Model") 85 | model.train_model() 86 | print ("Creating Frozen Graph") 87 | sess = K.get_session() 88 | create_frozen_graph(sess, ['softmax_tensor_1/Softmax'],FREEZE_GRAPH_DIR) 89 | 90 | print ("Converting Frozen Graph To Tensorboard compatible file") 91 | pb_to_tensorboard(FREEZE_GRAPH_DIR, "freeze") 92 | 93 | print ("Optimizing the graph for inference") 94 | optimize_graph(FREEZE_GRAPH_DIR,OPTIMIZED_GRAPH_DIR) 95 | pb_to_tensorboard(OPTIMIZED_GRAPH_DIR,"optimize") 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | 101 | -------------------------------------------------------------------------------- /Chapter04/parameters.py: -------------------------------------------------------------------------------- 1 | LOGGING_DIR = "./logs" 2 | FREEZE_FILE_NAME = 'MNIST_model.pb' 3 | OPTIMIZE_FILE_NAME = 'MNIST_optimized.pb' 4 | 5 | BATCH_SIZE = 128 6 | EPOCHS = 12 7 | 8 | # input image dimensions 9 | IMAGE_SIZE = 28 10 | NUM_CLASSES = 10 11 | INPUT_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 1) 12 | -------------------------------------------------------------------------------- /Chapter04/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | graphviz==0.8 21 | grpcio==1.12.1 22 | h5py==2.8.0 23 | html5lib==0.9999999 24 | imageio==2.4.1 25 | Keras==2.2.4 26 | Keras-Applications==1.0.6 27 | Keras-Preprocessing==1.0.5 28 | kiwisolver==1.0.1 29 | linecache2==1.0.0 30 | Markdown==2.6.11 31 | matplotlib==2.2.2 32 | mkl-fft==1.0.4 33 | mkl-random==1.0.1 34 | mock==2.0.0 35 | more-itertools==4.3.0 36 | multipledispatch==0.6.0 37 | mxnet==0.11.0rc1 38 | networkx==2.2 39 | numpy==1.14.5 40 | olefile==0.45.1 41 | pandas==0.23.2 42 | pathlib2==2.3.2 43 | patsy==0.5.0 44 | pbr==4.2.0 45 | Pillow==5.3.0 46 | pluggy==0.7.1 47 | protobuf==3.6.1 48 | py==1.6.0 49 | pyparsing==2.2.0 50 | pytest==3.8.1 51 | python-dateutil==2.7.3 52 | pytz==2018.5 53 | PyWavelets==1.0.1 54 | PyYAML==3.13 55 | scandir==1.9.0 56 | scikit-image==0.14.0 57 | scikit-learn==0.20.0 58 | scipy==1.1.0 59 | seaborn==0.9.0 60 | singledispatch==3.4.0.3 61 | six==1.11.0 62 | statsmodels==0.9.0 63 | subprocess32==3.5.2 64 | tensorboard==1.11.0 65 | tensorflow==1.11.0 66 | tensorflow-probability==0.4.0 67 | termcolor==1.1.0 68 | toolz==0.9.0 69 | tornado==5.1 70 | tqdm==4.23.4 71 | traceback2==1.4.0 72 | unittest2==1.1.0 73 | Werkzeug==0.14.1 74 | wget==3.2 75 | -------------------------------------------------------------------------------- /Chapter04/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from time import time 3 | import tensorflow as tf 4 | from tensorflow.python.platform import gfile 5 | from tensorflow.core.framework import graph_pb2 6 | from tensorflow.core.framework import node_def_pb2 7 | from tensorflow.python.framework.graph_util import convert_variables_to_constants 8 | from parameters import * 9 | 10 | 11 | def create_model_dir(): 12 | current_time = time() 13 | model_dir = LOGGING_DIR + "/model_files_{}".format(current_time) 14 | if not os.path.exists(model_dir): 15 | os.makedirs(model_dir) 16 | return model_dir 17 | 18 | def create_freeze_graph_dir(model_dir): 19 | freeze_graph_dir = os.path.join(model_dir, "freeze") 20 | if not os.path.exists(freeze_graph_dir): 21 | os.makedirs(freeze_graph_dir) 22 | return freeze_graph_dir 23 | 24 | def create_optimized_graph_dir(model_dir): 25 | optimized_graph_dir = os.path.join(model_dir, "optimized") 26 | if not os.path.exists(optimized_graph_dir): 27 | os.makedirs(optimized_graph_dir) 28 | return optimized_graph_dir 29 | 30 | def create_frozen_graph(sess,output_name,freeze_graph_dir): 31 | frozen_graph = freeze_session(sess, 32 | output_names=output_name) 33 | tf.train.write_graph(frozen_graph, freeze_graph_dir, FREEZE_FILE_NAME , as_text=False) 34 | 35 | def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True): 36 | """ 37 | Freezes the state of a session into a pruned computation graph. 38 | 39 | Converts the existing graph into a new graph where variable nodes are replaced by 40 | constants. New graph trims the existing graph of any operations which are not required 41 | to compute the requested output. 42 | 43 | outputs are removed. 44 | @param session The TensorFlow session to be frozen. 45 | @param keep_var_names A list of variable names that should not be frozen, 46 | or None to freeze all the variables in the graph. 47 | @param output_names Names of the relevant graph outputs. 48 | @param clear_devices Remove the device directives from the graph for better portability. 49 | @return The frozen graph definition. 50 | """ 51 | 52 | graph = session.graph 53 | with graph.as_default(): 54 | freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or [])) 55 | output_names = output_names or [] 56 | output_names += [v.op.name for v in tf.global_variables()] 57 | input_graph_def = graph.as_graph_def() 58 | if clear_devices: 59 | for node in input_graph_def.node: 60 | node.device = "" 61 | frozen_graph = convert_variables_to_constants(session, input_graph_def, 62 | output_names, freeze_var_names) 63 | return frozen_graph 64 | 65 | def pb_to_tensorboard(input_graph_dir,graph_type ="freeze"): 66 | ''' 67 | Converts the graph ".pb" file to Tensorboard readable format 68 | :param input_graph_dir: Directory where the graph file is stored 69 | :param graph_type: "freeze" or "optimize" depending on the operation. 70 | :return: Saves the file in the folder which can be opened through Tensorboard 71 | ''' 72 | file_name = "" 73 | if graph_type == "freeze": 74 | file_name = FREEZE_FILE_NAME 75 | elif graph_type == "optimize": 76 | file_name = OPTIMIZE_FILE_NAME 77 | 78 | with tf.Session() as sess: 79 | model_filename = input_graph_dir + "/" + file_name 80 | with gfile.FastGFile(model_filename, 'rb') as f: 81 | graph_def = tf.GraphDef() 82 | graph_def.ParseFromString(f.read()) 83 | g_in = tf.import_graph_def(graph_def) 84 | train_writer = tf.summary.FileWriter(input_graph_dir) 85 | train_writer.add_graph(sess.graph) 86 | 87 | def strip(input_graph, drop_scope, input_before, output_after, pl_name): 88 | ''' 89 | This function strips the drop_scope node from the graph. 90 | :param input_graph: Input graph 91 | :param drop_scope: Scope like "Dropout" which needs to be removed from the graph 92 | :param input_before: Input before the drop_scope 93 | :param output_after: Output after the drop_scope 94 | :param pl_name: Name of pl 95 | :return: stripped output graph 96 | ''' 97 | input_nodes = input_graph.node 98 | nodes_after_strip = [] 99 | for node in input_nodes: 100 | if node.name.startswith(drop_scope + '/'): 101 | continue 102 | 103 | if node.name == pl_name: 104 | continue 105 | 106 | new_node = node_def_pb2.NodeDef() 107 | new_node.CopyFrom(node) 108 | if new_node.name == output_after: 109 | new_input = [] 110 | for node_name in new_node.input: 111 | if node_name == drop_scope + '/cond/Merge': 112 | new_input.append(input_before) 113 | else: 114 | new_input.append(node_name) 115 | del new_node.input[:] 116 | new_node.input.extend(new_input) 117 | else: 118 | new_input= [] 119 | for node_name in new_node.input: 120 | if node_name == drop_scope + '/cond/Merge': 121 | new_input.append(input_before) 122 | else: 123 | new_input.append(node_name) 124 | del new_node.input[:] 125 | new_node.input.extend(new_input) 126 | 127 | nodes_after_strip.append(new_node) 128 | 129 | output_graph = graph_pb2.GraphDef() 130 | output_graph.node.extend(nodes_after_strip) 131 | return output_graph 132 | 133 | 134 | def optimize_graph(input_dir, output_dir): 135 | ''' 136 | This is used to optimize the frozen graph by removing any unnecessary ops 137 | :param input_dir: directory where input graph is stored. 138 | :param output_dir: directory where final graph should be stored. 139 | :return: None 140 | ''' 141 | input_graph = os.path.join(input_dir, FREEZE_FILE_NAME) 142 | output_graph = os.path.join(output_dir, OPTIMIZE_FILE_NAME) 143 | 144 | input_graph_def = tf.GraphDef() 145 | with tf.gfile.FastGFile(input_graph, "rb") as f: 146 | input_graph_def.ParseFromString(f.read()) 147 | 148 | output_graph_def = strip(input_graph_def, u'dropout_1', u'conv2d_2/bias', u'dense_1/kernel', u'training') 149 | output_graph_def = strip(output_graph_def, u'dropout_3', u'max_pooling2d_2/MaxPool', u'flatten_2/Shape', 150 | u'training') 151 | output_graph_def = strip(output_graph_def, u'dropout_4', u'dense_3/Relu', u'dense_4/kernel', u'training') 152 | output_graph_def = strip(output_graph_def, u'Adadelta_1', u'softmax_tensor_1/Softmax', 153 | u'training/Adadelta/Variable', u'training') 154 | output_graph_def = strip(output_graph_def, u'training', u'softmax_tensor_1/Softmax', 155 | u'_', u'training') 156 | 157 | with tf.gfile.GFile(output_graph, "wb") as f: 158 | f.write(output_graph_def.SerializeToString()) 159 | -------------------------------------------------------------------------------- /Chapter06/GP.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import gpflow 4 | 5 | from PreProcessing import PreProcessing 6 | 7 | 8 | class GP: 9 | preprocessed_data = None 10 | kernel = None 11 | gp_model = None 12 | 13 | def __init__(self, company): 14 | self.preprocessed_data = PreProcessing(str(company)) 15 | 16 | def make_gp_predictions(self, start_year, end_year, pred_year, pred_quarters = []): 17 | start_year, end_year, pred_year= int(start_year),int(end_year), int(pred_year) 18 | years_quarters = list(range(start_year, end_year + 1)) + ['Quarter'] 19 | years_in_train = years_quarters[:-2] 20 | price_df = self.preprocessed_data.prices_by_year[self.preprocessed_data.prices_by_year.columns.intersection(years_quarters)] 21 | 22 | num_days_in_train = list(price_df.index.values) 23 | 24 | #Generating X and Y for Training 25 | first_year_prices = price_df[start_year] 26 | if start_year == self.preprocessed_data.num_years[0]: 27 | first_year_prices = (first_year_prices[first_year_prices.iloc[:] != 0]) 28 | first_year_prices = (pd.Series([0.0], index=[first_year_prices.index[0]-1])).append(first_year_prices) 29 | 30 | first_year_days = list(first_year_prices.index.values) 31 | first_year_X = np.array([[start_year, day] for day in first_year_days]) 32 | 33 | X = first_year_X 34 | Target = np.array(first_year_prices) 35 | for year in years_in_train[1:]: 36 | current_year_prices = list(price_df.loc[:, year]) 37 | current_year_X = np.array([[year, day] for day in num_days_in_train]) 38 | X = np.append(X, current_year_X, axis=0) 39 | Target = np.append(Target, current_year_prices) 40 | 41 | final_year_prices = price_df[end_year] 42 | final_year_prices = final_year_prices[final_year_prices.iloc[:].notnull()] 43 | 44 | final_year_days = list(final_year_prices.index.values) 45 | if pred_quarters is not None: 46 | length = 63 * (pred_quarters[0] - 1) 47 | final_year_days = final_year_days[:length] 48 | final_year_prices = final_year_prices[:length] 49 | final_year_X = np.array([[end_year, day] for day in final_year_days]) 50 | 51 | X = np.append(X, final_year_X, axis=0) 52 | Target = np.append(Target, final_year_prices) 53 | 54 | if pred_quarters is not None: 55 | days_for_prediction = [day for day in 56 | range(63 * (pred_quarters[0]-1), 63 * pred_quarters[int(len(pred_quarters) != 1)])] 57 | else: 58 | days_for_prediction = list(range(0, self.preprocessed_data.num_days)) 59 | x_mesh = np.linspace(days_for_prediction[0], days_for_prediction[-1] 60 | , 2000) 61 | x_pred = ([[pred_year, x_mesh[i]] for i in range(len(x_mesh))]) 62 | X = X.astype(np.float64) 63 | Target = np.expand_dims(Target, axis=1) 64 | kernel = gpflow.kernels.RBF(2, lengthscales=1, variance=63) + gpflow.kernels.White(2, variance=1e-10) 65 | self.gp_model = gpflow.models.GPR(X, Target, kern=kernel) 66 | gpflow.train.ScipyOptimizer().minimize(self.gp_model) 67 | y_mean, y_var = self.gp_model.predict_y(x_pred) 68 | 69 | return x_mesh, y_mean, y_var -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/GP.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import gpflow 4 | 5 | from PreProcessing import PreProcessing 6 | 7 | 8 | class GP: 9 | preprocessed_data = None 10 | kernel = None 11 | gp_model = None 12 | 13 | def __init__(self, company): 14 | self.preprocessed_data = PreProcessing(str(company)) 15 | 16 | def make_gp_predictions(self, start_year, end_year, pred_year, pred_quarters = []): 17 | start_year, end_year, pred_year= int(start_year),int(end_year), int(pred_year) 18 | years_quarters = list(range(start_year, end_year + 1)) + ['Quarter'] 19 | years_in_train = years_quarters[:-2] 20 | price_df = self.preprocessed_data.prices_by_year[self.preprocessed_data.prices_by_year.columns.intersection(years_quarters)] 21 | 22 | num_days_in_train = list(price_df.index.values) 23 | 24 | #Generating X and Y for Training 25 | first_year_prices = price_df[start_year] 26 | if start_year == self.preprocessed_data.num_years[0]: 27 | first_year_prices = (first_year_prices[first_year_prices.iloc[:] != 0]) 28 | first_year_prices = (pd.Series([0.0], index=[first_year_prices.index[0]-1])).append(first_year_prices) 29 | 30 | first_year_days = list(first_year_prices.index.values) 31 | first_year_X = np.array([[start_year, day] for day in first_year_days]) 32 | 33 | X = first_year_X 34 | Target = np.array(first_year_prices) 35 | for year in years_in_train[1:]: 36 | current_year_prices = list(price_df.loc[:, year]) 37 | current_year_X = np.array([[year, day] for day in num_days_in_train]) 38 | X = np.append(X, current_year_X, axis=0) 39 | Target = np.append(Target, current_year_prices) 40 | 41 | final_year_prices = price_df[end_year] 42 | final_year_prices = final_year_prices[final_year_prices.iloc[:].notnull()] 43 | 44 | final_year_days = list(final_year_prices.index.values) 45 | if pred_quarters is not None: 46 | length = 63 * (pred_quarters[0] - 1) 47 | final_year_days = final_year_days[:length] 48 | final_year_prices = final_year_prices[:length] 49 | final_year_X = np.array([[end_year, day] for day in final_year_days]) 50 | 51 | X = np.append(X, final_year_X, axis=0) 52 | Target = np.append(Target, final_year_prices) 53 | 54 | if pred_quarters is not None: 55 | days_for_prediction = [day for day in 56 | range(63 * (pred_quarters[0]-1), 63 * pred_quarters[int(len(pred_quarters) != 1)])] 57 | else: 58 | days_for_prediction = list(range(0, self.preprocessed_data.num_days)) 59 | x_mesh = np.linspace(days_for_prediction[0], days_for_prediction[-1] 60 | , 2000) 61 | x_pred = ([[pred_year, x_mesh[i]] for i in range(len(x_mesh))]) 62 | X = X.astype(np.float64) 63 | Target = np.expand_dims(Target, axis=1) 64 | kernel = gpflow.kernels.RBF(2, lengthscales=1, variance=63) + gpflow.kernels.White(2, variance=1e-10) 65 | self.gp_model = gpflow.models.GPR(X, Target, kern=kernel) 66 | gpflow.train.ScipyOptimizer().minimize(self.gp_model) 67 | y_mean, y_var = self.gp_model.predict_y(x_pred) 68 | 69 | return x_mesh, y_mean, y_var -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_1992_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_1992_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GE_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GE_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2004_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2004_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/GOOG_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/GOOG_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2002_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2002_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/Images/NFLX_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/GaussianProcess/Images/NFLX_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/PreProcessing.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | 5 | class PreProcessing: 6 | data = None 7 | quarter_names = None 8 | num_years = None 9 | num_days = None 10 | 11 | def __init__(self, name): 12 | name= str(name) 13 | self.get_data(name) 14 | self.data['Normalized_Close'] = self.normalized_data_col(self.data) 15 | self.data['Quarter'] = self.get_quarter_col(self.data) 16 | self.num_days = 252 17 | self.prices_by_year = self.get_prices_by_year() 18 | self.quarter_length = int(self.num_days / 4) 19 | 20 | def get_prices_by_year(self): 21 | df = self.modify_first_year_data() 22 | for i in range(1, len(self.num_years)): 23 | df = pd.concat([df, pd.DataFrame(self.get_year_data(year=self.num_years[i], normalized=True))], axis=1) 24 | 25 | df = df[:self.num_days] 26 | 27 | quarter_col = [] 28 | num_days_in_quarter = self.num_days // 4 29 | for j in range(0, len(self.quarter_names)): 30 | quarter_col.extend([self.quarter_names[j]]*num_days_in_quarter) 31 | quarter_col = pd.DataFrame(quarter_col) 32 | 33 | df = pd.concat([df, quarter_col], axis=1) 34 | df.columns = self.num_years + ['Quarter'] 35 | df.index.name = 'Day' 36 | 37 | df = self.fill_nans_with_mean(df) 38 | 39 | return df 40 | 41 | def get_year_data(self, year, normalized=True): 42 | year = int(year) 43 | if year not in self.num_years: 44 | raise ValueError('\n' + 45 | 'Input year: {} not in available years: {}'.format(year, self.num_years)) 46 | 47 | prices = (self.data.loc[self.data['Date'].dt.year == year]) 48 | if normalized: 49 | return np.asarray(prices.loc[:, 'Normalized_Close']) 50 | else: 51 | return np.asarray(prices.loc[:, 'Adj Close']) 52 | 53 | def get_adj_close_prices(self, start_year, end_year): 54 | start_year,end_year = int(start_year), int(end_year) 55 | if start_year < self.num_years[0] or end_year > self.num_years[-1]: 56 | raise ValueError('\n' + 57 | 'Incorrect data! \n' + 58 | 'Max range available: {}-{}\n'.format(self.num_years[0], self.num_years[-1]) + 59 | 'Was: {}-{}'.format(start_year, end_year)) 60 | 61 | df = (self.data.loc[(self.data['Date'].dt.year >= start_year) & (self.data['Date'].dt.year <= end_year)]) 62 | df = df.loc[:, ['Date', 'Adj Close']] 63 | 64 | return df 65 | 66 | def get_data(self, file_name): 67 | file_name = str(file_name) 68 | self.data = pd.read_csv('Data/' + file_name + '.csv') 69 | self.data = self.data.iloc[:, [0, 5]] 70 | self.data = self.data.dropna() 71 | self.data.Date = pd.to_datetime(self.data.Date) 72 | self.quarter_names = ['Q' + str(i) for i in range(1, 5)] 73 | 74 | def normalized_data_col(self, df): 75 | price_normalized = pd.DataFrame() 76 | 77 | date_list = list(df.Date) 78 | self.num_years = sorted(list(set([date_list[i].year for i in range(0, len(date_list))]))) 79 | 80 | for i in range(0, len(self.num_years)): 81 | prices_data = self.get_year_data(year=self.num_years[i], normalized=False) 82 | prices_data = [(prices_data[i] - np.mean(prices_data)) / np.std(prices_data) for i in range(0, len(prices_data))] 83 | prices_data = [(prices_data[i] - prices_data[0]) for i in range(0, len(prices_data))] 84 | price_normalized = price_normalized.append(prices_data, ignore_index=True) 85 | 86 | return price_normalized 87 | 88 | def get_quarter_col(self, df): 89 | quarters = pd.DataFrame() 90 | 91 | for i in range(0, len(self.num_years)): 92 | dates = list((df.loc[df['Date'].dt.year == self.num_years[i]]).iloc[:, 0]) 93 | dates = pd.DataFrame([self.quarter_names[(int(dates[i].month) - 1) // 3] for i in range(0, len(dates))]) 94 | quarters = quarters.append(dates, ignore_index=True) 95 | 96 | return quarters 97 | 98 | 99 | def modify_first_year_data(self): 100 | price_data = pd.DataFrame(self.get_year_data(self.num_years[0])) 101 | df = pd.DataFrame([0 for _ in range(self.num_days - len(price_data.index))]) 102 | df = pd.concat([df, price_data], ignore_index=True) 103 | 104 | return df 105 | 106 | def fill_nans_with_mean(self, df): 107 | years = self.num_years[:-1] 108 | df_wo_last_year = df.loc[:,years] 109 | df_wo_last_year = df_wo_last_year.fillna(df_wo_last_year.mean()) 110 | df_wo_last_year[self.num_years[-1]] = df[self.num_years[-1]] 111 | df= df_wo_last_year 112 | 113 | return df 114 | -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/README.md: -------------------------------------------------------------------------------- 1 | # Gaussian Process Regression for Predicting Stock Prices 2 | 3 |

This project illustrates how to use Gaussian Process to predict Stock Markets. We specifically stocks of Google, Netflix and GE as examples in this case.

4 | 5 | 6 | ### Installations 7 | * This code is checked on using native Python 3 with anaconda 8 | * Create a conda virtual environment and install packages using requirements.txt 9 | * We use plug and play functions from [GpFlow](https://github.com/GPflow/GPflow) library, which is a wrapper on top of Tensorflow for Gaussian Processes. Please install that library as mentioned in the README of that repo. 10 | 11 | 12 | 13 | 14 | ### Python Code Run Instructions 15 | To run the code just execute 16 | ``` 17 | python main.py 18 | ``` 19 | 20 | #### Dataset 21 | The dataset was downloaded from [Yahoo Finance](https://finance.yahoo.com). We downloaded the entire stock history for three companies: 22 | * [Google] (https://finance.yahoo.com/quote/GOOG) 23 | * [Netflix] (https://finance.yahoo.com/quote/NFLX) 24 | * [General Electric Company] (https://finance.yahoo.com/quote/GE) 25 | 26 | 27 | ### Code Details 28 | Code is pretty self explanatory. There are mainly four files in implementation: 29 | 30 | * main.py : Main function which runs the entire code 31 | * PreProcessing.py : Preprocesses the stock data to make it ready for modeling 32 | * VisualizeData.py : Contains the functions to visualize the dataset 33 | * GP.py : Contains the implementation of training and inference through Gaussian Process using GpFlow library 34 | 35 | 36 | -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from VisualizeData import * 3 | 4 | 5 | start_year = 2008 6 | 7 | def main(): 8 | company_list = [] 9 | plot_objects = {} 10 | for company_file in os.listdir('Data'): 11 | if company_file == '.DS_Store': 12 | continue 13 | company = company_file.split('.')[0] 14 | company_list.append(company) 15 | plot_objects[company] = PlotData(company=company) 16 | 17 | for company in company_list: 18 | print ("Company Name ", company) 19 | do_work(company,plot_objects[company]) 20 | print("Done!") 21 | 22 | 23 | def do_work(company,plot_data): 24 | plot_data.plot_complete_history() 25 | plot_data.plot_prices_data(start_year=start_year, end_year=2016) 26 | plot_data.plot_normalized_prices(first_year=start_year, last_year=2016) 27 | plot_data.plot_gp_predictions(train_start=start_year, train_end=2016, pred_year=2017) 28 | plot_data.plot_prices_data(start_year=start_year, end_year=2018) 29 | plot_data.plot_gp_predictions(train_start=start_year, train_end=2018, pred_year=2018, pred_quarters=[4]) 30 | print(company + ' summary done!') 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /Chapter06/GaussianProcess/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | gpflow==1.2.0 21 | graphviz==0.8 22 | grpcio==1.12.1 23 | h5py==2.8.0 24 | html5lib==0.9999999 25 | imageio==2.4.1 26 | Keras==2.2.4 27 | Keras-Applications==1.0.6 28 | Keras-Preprocessing==1.0.5 29 | kiwisolver==1.0.1 30 | linecache2==1.0.0 31 | Markdown==2.6.11 32 | matplotlib==2.2.2 33 | mkl-fft==1.0.4 34 | mkl-random==1.0.1 35 | mock==2.0.0 36 | more-itertools==4.3.0 37 | multipledispatch==0.6.0 38 | mxnet==0.11.0rc1 39 | networkx==2.2 40 | numpy==1.14.5 41 | olefile==0.45.1 42 | pandas==0.23.2 43 | pathlib2==2.3.2 44 | patsy==0.5.0 45 | pbr==4.2.0 46 | Pillow==5.3.0 47 | pluggy==0.7.1 48 | protobuf==3.6.1 49 | py==1.6.0 50 | pyparsing==2.2.0 51 | pytest==3.8.1 52 | python-dateutil==2.7.3 53 | pytz==2018.5 54 | PyWavelets==1.0.1 55 | PyYAML==3.13 56 | scandir==1.9.0 57 | scikit-image==0.14.0 58 | scikit-learn==0.20.0 59 | scipy==1.1.0 60 | seaborn==0.9.0 61 | singledispatch==3.4.0.3 62 | six==1.11.0 63 | statsmodels==0.9.0 64 | subprocess32==3.5.2 65 | tensorboard==1.11.0 66 | tensorflow==1.11.0 67 | tensorflow-probability==0.4.0 68 | termcolor==1.1.0 69 | toolz==0.9.0 70 | tornado==5.1 71 | tqdm==4.23.4 72 | traceback2==1.4.0 73 | unittest2==1.1.0 74 | Werkzeug==0.14.1 75 | wget==3.2 76 | -------------------------------------------------------------------------------- /Chapter06/Images/GE_1992_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_1992_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GE_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GE_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/Images/GE_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GE_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/Images/GE_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GE_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2004_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2004_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/Images/GOOG_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/GOOG_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2002_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2002_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2008_2016_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2008_2016_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2008_2016_prices_normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2008_2016_prices_normalized.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2008_2018_adj_closing_prices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2008_2018_adj_closing_prices.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2017_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2017_predicted.png -------------------------------------------------------------------------------- /Chapter06/Images/NFLX_2018_predicted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter06/Images/NFLX_2018_predicted.png -------------------------------------------------------------------------------- /Chapter06/PreProcessing.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | 5 | class PreProcessing: 6 | data = None 7 | quarter_names = None 8 | num_years = None 9 | num_days = None 10 | 11 | def __init__(self, name): 12 | name= str(name) 13 | self.get_data(name) 14 | self.data['Normalized_Close'] = self.normalized_data_col(self.data) 15 | self.data['Quarter'] = self.get_quarter_col(self.data) 16 | self.num_days = 252 17 | self.prices_by_year = self.get_prices_by_year() 18 | self.quarter_length = int(self.num_days / 4) 19 | 20 | def get_prices_by_year(self): 21 | df = self.modify_first_year_data() 22 | for i in range(1, len(self.num_years)): 23 | df = pd.concat([df, pd.DataFrame(self.get_year_data(year=self.num_years[i], normalized=True))], axis=1) 24 | 25 | df = df[:self.num_days] 26 | 27 | quarter_col = [] 28 | num_days_in_quarter = self.num_days // 4 29 | for j in range(0, len(self.quarter_names)): 30 | quarter_col.extend([self.quarter_names[j]]*num_days_in_quarter) 31 | quarter_col = pd.DataFrame(quarter_col) 32 | 33 | df = pd.concat([df, quarter_col], axis=1) 34 | df.columns = self.num_years + ['Quarter'] 35 | df.index.name = 'Day' 36 | 37 | df = self.fill_nans_with_mean(df) 38 | 39 | return df 40 | 41 | def get_year_data(self, year, normalized=True): 42 | year = int(year) 43 | if year not in self.num_years: 44 | raise ValueError('\n' + 45 | 'Input year: {} not in available years: {}'.format(year, self.num_years)) 46 | 47 | prices = (self.data.loc[self.data['Date'].dt.year == year]) 48 | if normalized: 49 | return np.asarray(prices.loc[:, 'Normalized_Close']) 50 | else: 51 | return np.asarray(prices.loc[:, 'Adj Close']) 52 | 53 | def get_adj_close_prices(self, start_year, end_year): 54 | start_year,end_year = int(start_year), int(end_year) 55 | if start_year < self.num_years[0] or end_year > self.num_years[-1]: 56 | raise ValueError('\n' + 57 | 'Incorrect data! \n' + 58 | 'Max range available: {}-{}\n'.format(self.num_years[0], self.num_years[-1]) + 59 | 'Was: {}-{}'.format(start_year, end_year)) 60 | 61 | df = (self.data.loc[(self.data['Date'].dt.year >= start_year) & (self.data['Date'].dt.year <= end_year)]) 62 | df = df.loc[:, ['Date', 'Adj Close']] 63 | 64 | return df 65 | 66 | def get_data(self, file_name): 67 | file_name = str(file_name) 68 | self.data = pd.read_csv('Data/' + file_name + '.csv') 69 | self.data = self.data.iloc[:, [0, 5]] 70 | self.data = self.data.dropna() 71 | self.data.Date = pd.to_datetime(self.data.Date) 72 | self.quarter_names = ['Q' + str(i) for i in range(1, 5)] 73 | 74 | def normalized_data_col(self, df): 75 | price_normalized = pd.DataFrame() 76 | 77 | date_list = list(df.Date) 78 | self.num_years = sorted(list(set([date_list[i].year for i in range(0, len(date_list))]))) 79 | 80 | for i in range(0, len(self.num_years)): 81 | prices_data = self.get_year_data(year=self.num_years[i], normalized=False) 82 | prices_data = [(prices_data[i] - np.mean(prices_data)) / np.std(prices_data) for i in range(0, len(prices_data))] 83 | prices_data = [(prices_data[i] - prices_data[0]) for i in range(0, len(prices_data))] 84 | price_normalized = price_normalized.append(prices_data, ignore_index=True) 85 | 86 | return price_normalized 87 | 88 | def get_quarter_col(self, df): 89 | quarters = pd.DataFrame() 90 | 91 | for i in range(0, len(self.num_years)): 92 | dates = list((df.loc[df['Date'].dt.year == self.num_years[i]]).iloc[:, 0]) 93 | dates = pd.DataFrame([self.quarter_names[(int(dates[i].month) - 1) // 3] for i in range(0, len(dates))]) 94 | quarters = quarters.append(dates, ignore_index=True) 95 | 96 | return quarters 97 | 98 | 99 | def modify_first_year_data(self): 100 | price_data = pd.DataFrame(self.get_year_data(self.num_years[0])) 101 | df = pd.DataFrame([0 for _ in range(self.num_days - len(price_data.index))]) 102 | df = pd.concat([df, price_data], ignore_index=True) 103 | 104 | return df 105 | 106 | def fill_nans_with_mean(self, df): 107 | years = self.num_years[:-1] 108 | df_wo_last_year = df.loc[:,years] 109 | df_wo_last_year = df_wo_last_year.fillna(df_wo_last_year.mean()) 110 | df_wo_last_year[self.num_years[-1]] = df[self.num_years[-1]] 111 | df= df_wo_last_year 112 | 113 | return df 114 | -------------------------------------------------------------------------------- /Chapter06/README.md: -------------------------------------------------------------------------------- 1 | # Gaussian Process Regression for Predicting Stock Prices 2 | 3 |

This project illustrates how to use Gaussian Process to predict Stock Markets. We specifically stocks of Google, Netflix and GE as examples in this case.

4 | 5 | 6 | ### Installations 7 | * This code is checked on using native Python 3 with anaconda 8 | * Create a conda virtual environment and install packages using requirements.txt 9 | * We use plug and play functions from [GpFlow](https://github.com/GPflow/GPflow) library, which is a wrapper on top of Tensorflow for Gaussian Processes. Please install that library as mentioned in the README of that repo. 10 | 11 | 12 | 13 | 14 | ### Python Code Run Instructions 15 | To run the code just execute 16 | ``` 17 | python main.py 18 | ``` 19 | 20 | #### Dataset 21 | The dataset was downloaded from [Yahoo Finance](https://finance.yahoo.com). We downloaded the entire stock history for three companies: 22 | * [Google] (https://finance.yahoo.com/quote/GOOG) 23 | * [Netflix] (https://finance.yahoo.com/quote/NFLX) 24 | * [General Electric Company] (https://finance.yahoo.com/quote/GE) 25 | 26 | 27 | ### Code Details 28 | Code is pretty self explanatory. There are mainly four files in implementation: 29 | 30 | * main.py : Main function which runs the entire code 31 | * PreProcessing.py : Preprocesses the stock data to make it ready for modeling 32 | * VisualizeData.py : Contains the functions to visualize the dataset 33 | * GP.py : Contains the implementation of training and inference through Gaussian Process using GpFlow library 34 | 35 | 36 | -------------------------------------------------------------------------------- /Chapter06/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from VisualizeData import * 3 | 4 | 5 | start_year = 2008 6 | 7 | def main(): 8 | company_list = [] 9 | plot_objects = {} 10 | for company_file in os.listdir('Data'): 11 | if company_file == '.DS_Store': 12 | continue 13 | company = company_file.split('.')[0] 14 | company_list.append(company) 15 | plot_objects[company] = PlotData(company=company) 16 | 17 | for company in company_list: 18 | print ("Company Name ", company) 19 | do_work(company,plot_objects[company]) 20 | print("Done!") 21 | 22 | 23 | def do_work(company,plot_data): 24 | plot_data.plot_complete_history() 25 | plot_data.plot_prices_data(start_year=start_year, end_year=2016) 26 | plot_data.plot_normalized_prices(first_year=start_year, last_year=2016) 27 | plot_data.plot_gp_predictions(train_start=start_year, train_end=2016, pred_year=2017) 28 | plot_data.plot_prices_data(start_year=start_year, end_year=2018) 29 | plot_data.plot_gp_predictions(train_start=start_year, train_end=2018, pred_year=2018, pred_quarters=[4]) 30 | print(company + ' summary done!') 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /Chapter06/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | gpflow==1.2.0 21 | graphviz==0.8 22 | grpcio==1.12.1 23 | h5py==2.8.0 24 | html5lib==0.9999999 25 | imageio==2.4.1 26 | Keras==2.2.4 27 | Keras-Applications==1.0.6 28 | Keras-Preprocessing==1.0.5 29 | kiwisolver==1.0.1 30 | linecache2==1.0.0 31 | Markdown==2.6.11 32 | matplotlib==2.2.2 33 | mkl-fft==1.0.4 34 | mkl-random==1.0.1 35 | mock==2.0.0 36 | more-itertools==4.3.0 37 | multipledispatch==0.6.0 38 | mxnet==0.11.0rc1 39 | networkx==2.2 40 | numpy==1.14.5 41 | olefile==0.45.1 42 | pandas==0.23.2 43 | pathlib2==2.3.2 44 | patsy==0.5.0 45 | pbr==4.2.0 46 | Pillow==5.3.0 47 | pluggy==0.7.1 48 | protobuf==3.6.1 49 | py==1.6.0 50 | pyparsing==2.2.0 51 | pytest==3.8.1 52 | python-dateutil==2.7.3 53 | pytz==2018.5 54 | PyWavelets==1.0.1 55 | PyYAML==3.13 56 | scandir==1.9.0 57 | scikit-image==0.14.0 58 | scikit-learn==0.20.0 59 | scipy==1.1.0 60 | seaborn==0.9.0 61 | singledispatch==3.4.0.3 62 | six==1.11.0 63 | statsmodels==0.9.0 64 | subprocess32==3.5.2 65 | tensorboard==1.11.0 66 | tensorflow==1.11.0 67 | tensorflow-probability==0.4.0 68 | termcolor==1.1.0 69 | toolz==0.9.0 70 | tornado==5.1 71 | tqdm==4.23.4 72 | traceback2==1.4.0 73 | unittest2==1.1.0 74 | Werkzeug==0.14.1 75 | wget==3.2 76 | -------------------------------------------------------------------------------- /Chapter07/README.md: -------------------------------------------------------------------------------- 1 | # Credit Card Fraud Detection Using Autoencoders 2 | 3 |

This project illustrates how to use autoencoders to classify fraudulent transactions in a credit card transaction data from Kaggle.

4 | 5 | 6 | ### Installations 7 | * This code is checked on using native Python 3 with anaconda 8 | * Create a conda virtual environment and install the requirements using requirements.txt in the repo. 9 | * We use Keras with Tensorflow backend for this project 10 | 11 | 12 | #### Dataset 13 | The dataset was downloaded from [Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud). PLEASE download the data to "./data" folder as a ZIP file from the website before executing the code. Otherwise the code will run into errors. 14 | 15 | 16 | ### Python Code Run Instructions 17 | Make sure you have downloaded the dataset as per the instructions above. Then, To run the code just execute 18 | ``` 19 | python main.py 20 | ``` 21 | 22 | 23 | ### Code Details 24 | Code is pretty self explanatory. There are mainly four files in implementation: 25 | 26 | * main.py : Main function which runs the entire code 27 | * model.py : Contains the model class which defines and trains the model 28 | * utils.py : Contains general utility functions to load data and generate relevant plots 29 | * parameters.py : Defines the static parameters used for building the model and storing the results 30 | 31 | 32 | -------------------------------------------------------------------------------- /Chapter07/data/Download_Data_Zip_File_Here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/data/Download_Data_Zip_File_Here.txt -------------------------------------------------------------------------------- /Chapter07/main.py: -------------------------------------------------------------------------------- 1 | 2 | from utils import * 3 | from model import * 4 | 5 | 6 | def main(): 7 | # Loading and processing data 8 | print ("Loading the data") 9 | processed_data = load_and_preprocess_data() 10 | print ("Getting train and test dataset") 11 | X_train, X_test, y_test = get_train_and_test_data(processed_data) 12 | 13 | model_obj = MODEL(X_train,X_test,y_test) 14 | print ("Training the model") 15 | model_obj.train_model() 16 | print ("Loading the trained model") 17 | model_obj.get_trained_model() 18 | print ("Get Reconstruction Loss By Class") 19 | model_obj.plot_reconstruction_error_by_class() 20 | print ("Getting Precision Recall Curves by Thresholds") 21 | model_obj.get_precision_recall_curves() 22 | print ("Get confusion matrix with 80% recall on Test Dataset") 23 | model_obj.get_confusion_matrix(min_recall = 0.8) 24 | 25 | 26 | if __name__==main(): 27 | main() 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /Chapter07/parameters.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file defines the parameters for the project 3 | ''' 4 | 5 | ## Directories 6 | 7 | DATA_DIR = "./data" 8 | PLOTS_DIR = "./plots" 9 | MODEL_SAVE_DIR = "./saved_models" 10 | LOG_DIR = "./logs" 11 | 12 | 13 | ## Training Parameters 14 | RANDOM_SEED = 0 15 | DIM_ENCODER = 14 16 | EPOCHS = 100 17 | BATCH_SIZE = 32 18 | OPTIMIZER = 'adam' 19 | LOSS = 'mean_squared_error' 20 | EVAL_METRIC = 'accuracy' 21 | -------------------------------------------------------------------------------- /Chapter07/plots/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/.DS_Store -------------------------------------------------------------------------------- /Chapter07/plots/Confusion_Matrix_with_threshold_3.89949698999.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Confusion_Matrix_with_threshold_3.89949698999.png -------------------------------------------------------------------------------- /Chapter07/plots/Loss_Curves.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Loss_Curves.png -------------------------------------------------------------------------------- /Chapter07/plots/Precision_Threshold_Curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Precision_Threshold_Curve.png -------------------------------------------------------------------------------- /Chapter07/plots/Recall_Threshold_Curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Recall_Threshold_Curve.png -------------------------------------------------------------------------------- /Chapter07/plots/Recon_Error_with_Fraud_Transactions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Recon_Error_with_Fraud_Transactions.png -------------------------------------------------------------------------------- /Chapter07/plots/Recon_Error_with_Normal_Transactions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter07/plots/Recon_Error_with_Normal_Transactions.png -------------------------------------------------------------------------------- /Chapter07/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.8.24 10 | configparser==3.5.0 11 | cycler==0.10.0 12 | enum34==1.1.6 13 | funcsigs==1.0.2 14 | functools32==3.2.3.post2 15 | futures==3.2.0 16 | gast==0.2.0 17 | graphviz==0.8 18 | grpcio==1.12.1 19 | h5py==2.8.0 20 | html5lib==0.9999999 21 | Keras==2.2.4 22 | Keras-Applications==1.0.6 23 | Keras-Preprocessing==1.0.5 24 | kiwisolver==1.0.1 25 | linecache2==1.0.0 26 | Markdown==2.6.11 27 | matplotlib==2.2.2 28 | mkl-fft==1.0.4 29 | mkl-random==1.0.1 30 | mock==2.0.0 31 | more-itertools==4.3.0 32 | multipledispatch==0.6.0 33 | mxnet==0.11.0rc1 34 | numpy==1.14.5 35 | olefile==0.45.1 36 | pandas==0.23.2 37 | pathlib2==2.3.2 38 | patsy==0.5.0 39 | pbr==4.2.0 40 | Pillow==5.2.0 41 | pluggy==0.7.1 42 | protobuf==3.5.2 43 | py==1.6.0 44 | pyparsing==2.2.0 45 | pyro-ppl==0.2.0+9c3d68e 46 | pytest==3.8.1 47 | python-dateutil==2.7.3 48 | pytz==2018.5 49 | PyYAML==3.13 50 | scandir==1.9.0 51 | scikit-learn==0.20.0 52 | scipy==1.1.0 53 | seaborn==0.9.0 54 | singledispatch==3.4.0.3 55 | six==1.11.0 56 | statsmodels==0.9.0 57 | subprocess32==3.5.2 58 | tensorboard==1.9.0 59 | tensorflow==1.9.0 60 | termcolor==1.1.0 61 | tornado==5.1 62 | tqdm==4.23.4 63 | traceback2==1.4.0 64 | unittest2==1.1.0 65 | Werkzeug==0.14.1 66 | wget==3.2 67 | -------------------------------------------------------------------------------- /Chapter07/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | from sklearn.preprocessing import StandardScaler 4 | from sklearn.model_selection import train_test_split 5 | import zipfile 6 | 7 | 8 | from parameters import * 9 | 10 | def read_data(): 11 | zf = zipfile.ZipFile(os.path.join(DATA_DIR,"creditcardfraud.zip")) 12 | data = pd.read_csv(zf.open("creditcard.csv")) 13 | return data 14 | 15 | 16 | def preprocess_data(data): 17 | data = data.drop(['Time'], axis=1) 18 | data['Amount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) 19 | return data 20 | 21 | 22 | def load_and_preprocess_data(): 23 | data = read_data() 24 | processed_data = preprocess_data(data) 25 | return processed_data 26 | 27 | def get_train_and_test_data(processed_data): 28 | X_train, X_test = train_test_split(processed_data, test_size=0.25, random_state=RANDOM_SEED) 29 | X_train = X_train[X_train.Class == 0] 30 | X_train = X_train.drop(['Class'], axis=1) 31 | 32 | y_test = X_test['Class'] 33 | X_test = X_test.drop(['Class'], axis=1) 34 | 35 | X_train = X_train.values 36 | X_test = X_test.values 37 | return X_train, X_test,y_test 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /Chapter08/Code/README.md: -------------------------------------------------------------------------------- 1 | # Traffic Sign Classification Using Bayesian Neural Networks 2 | This repo consists of an implementation of a simple Bayesian Neural Networks using German Traffic Sign Dataset. 3 | 4 | ### Installations 5 | * This code is checked on using native Python 3 with anaconda 6 | * Create a conda virtual environment and install relevant packages using requirements.txt file 7 | ``` 8 | pip install requirements.txt 9 | ``` 10 | For installing Tensorflow Probability, use the following command: 11 | ``` 12 | pip install --upgrade tensorflow-probability 13 | 14 | ``` 15 | 16 | ### Python Code Run Instructions 17 | To run the code just execute 18 | ``` 19 | python bnn.py 20 | ``` 21 | 22 | As the dataset is not large, execution should be fairly fast 23 | #### Dataset 24 | We use the German Traffic Sign Dataset. You can download the dataset form [here](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). On this website head over to Downloads section and then download "Images and Annotations" data from Training and Testing dataset subsections. 25 | Also download,"Extended annotations including class ids" from "Test Dataset" section in the website. 26 | Place the zip files under a folder named "Data" for code to execute. 27 | 28 | ### Code Details 29 | Code is pretty self explanatory. There are mainly four files in implementation: 30 | * bnn.py -- Implements the main function and neural network model. It also contains training and inference parts of the model. 31 | * parameters.py -- Defines the parameters used for modeling 32 | * utils.py -- Implements utility functions to run the code. 33 | 34 | Note that the model was not tuned for best hyperparameters. Feel free to play around. 35 | -------------------------------------------------------------------------------- /Chapter08/Code/parameters.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | #DATA_DIR = '/Users/ankit.jain/Documents/Teaching&Learning/Packt/Book/BNN/Data' 4 | DATA_DIR = os.path.join(os.getcwd(),"..","Data") 5 | NUM_CLASSES = 43 6 | IMG_SIZE = 32 7 | 8 | #Training Parameters 9 | BATCH_SIZE =128 10 | EPOCHS =1000 11 | LEARNING_RATE = 0.001 12 | 13 | # Inference Parameters 14 | NUM_MONTE_CARLO = 50 15 | -------------------------------------------------------------------------------- /Chapter08/Code/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | graphviz==0.8 21 | grpcio==1.12.1 22 | h5py==2.8.0 23 | html5lib==0.9999999 24 | imageio==2.4.1 25 | Keras==2.2.4 26 | Keras-Applications==1.0.6 27 | Keras-Preprocessing==1.0.5 28 | kiwisolver==1.0.1 29 | linecache2==1.0.0 30 | Markdown==2.6.11 31 | matplotlib==2.2.2 32 | mkl-fft==1.0.4 33 | mkl-random==1.0.1 34 | mock==2.0.0 35 | more-itertools==4.3.0 36 | multipledispatch==0.6.0 37 | mxnet==0.11.0rc1 38 | networkx==2.2 39 | numpy==1.14.5 40 | olefile==0.45.1 41 | pandas==0.23.2 42 | pathlib2==2.3.2 43 | patsy==0.5.0 44 | pbr==4.2.0 45 | Pillow==5.2.0 46 | pluggy==0.7.1 47 | protobuf==3.6.1 48 | py==1.6.0 49 | pyparsing==2.2.0 50 | pyro-ppl==0.2.0+9c3d68e 51 | pytest==3.8.1 52 | python-dateutil==2.7.3 53 | pytz==2018.5 54 | PyWavelets==1.0.1 55 | PyYAML==3.13 56 | scandir==1.9.0 57 | scikit-image==0.14.0 58 | scikit-learn==0.20.0 59 | scipy==1.1.0 60 | seaborn==0.9.0 61 | singledispatch==3.4.0.3 62 | six==1.11.0 63 | statsmodels==0.9.0 64 | subprocess32==3.5.2 65 | tensorboard==1.11.0 66 | tensorflow==1.11.0 67 | tensorflow-probability==0.4.0 68 | termcolor==1.1.0 69 | toolz==0.9.0 70 | tornado==5.1 71 | tqdm==4.23.4 72 | traceback2==1.4.0 73 | unittest2==1.1.0 74 | Werkzeug==0.14.1 75 | wget==3.2 76 | -------------------------------------------------------------------------------- /Chapter08/Data/Download_Data_Here.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter08/Data/Download_Data_Here.txt -------------------------------------------------------------------------------- /Chapter08/README.md: -------------------------------------------------------------------------------- 1 | Link to download the dataset: http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset 2 | -------------------------------------------------------------------------------- /Chapter09/DiscoGAN.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from parameters import * 3 | from utils import generator, discriminator 4 | import os 5 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 6 | 7 | 8 | 9 | class DiscoGAN: 10 | def __init__(self): 11 | with tf.variable_scope('Input'): 12 | self.X_bags = tf.placeholder(shape = [None, 64, 64, 3], name='bags', dtype=tf.float32) 13 | self.X_shoes = tf.placeholder(shape= [None, 64, 64, 3], name='shoes',dtype= tf.float32) 14 | self.initializer = tf.truncated_normal_initializer(stddev=0.02) 15 | self.define_network() 16 | self.define_loss() 17 | self.get_trainable_params() 18 | self.define_optimizer() 19 | self.summary_() 20 | 21 | def define_network(self): 22 | 23 | # Generators 24 | # This one is used to generate fake data 25 | self.gen_b_fake = generator(self.X_shoes, self.initializer,scope_name="generator_sb") 26 | self.gen_s_fake = generator(self.X_bags, self.initializer,scope_name="generator_bs") 27 | 28 | # Reconstruction Generators 29 | # Note that parameters are being used from previous layers 30 | self.gen_recon_s = generator(self.gen_b_fake, self.initializer,scope_name="generator_sb", reuse=True) 31 | self.gen_recon_b = generator(self.gen_s_fake, self.initializer, scope_name="generator_bs", reuse=True) 32 | 33 | # Discriminator for Shoes 34 | self.disc_s_real = discriminator(self.X_shoes,self.initializer, scope_name="discriminator_s") 35 | self.disc_s_fake = discriminator(self.gen_s_fake,self.initializer, scope_name="discriminator_s", reuse=True) 36 | 37 | # Discriminator for Bags 38 | self.disc_b_real = discriminator(self.X_bags,self.initializer,scope_name="discriminator_b") 39 | self.disc_b_fake = discriminator(self.gen_b_fake, self.initializer, reuse=True,scope_name="discriminator_b") 40 | 41 | # Defining Discriminators of Bags and Shoes 42 | 43 | def define_loss(self): 44 | # Reconstruction loss for generators 45 | self.const_loss_s = tf.reduce_mean(tf.losses.mean_squared_error(self.gen_recon_s, self.X_shoes)) 46 | self.const_loss_b = tf.reduce_mean(tf.losses.mean_squared_error(self.gen_recon_b, self.X_bags)) 47 | 48 | # Generator loss for GANs 49 | self.gen_s_loss = tf.reduce_mean( 50 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_s_fake, labels=tf.ones_like(self.disc_s_fake))) 51 | self.gen_b_loss = tf.reduce_mean( 52 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_fake, labels=tf.ones_like(self.disc_b_fake))) 53 | 54 | # Total Generator Loss 55 | self.gen_loss = (self.const_loss_b + self.const_loss_s) + self.gen_s_loss + self.gen_b_loss 56 | 57 | # Cross Entropy loss for discriminators for shoes and bags 58 | # Shoes 59 | self.disc_s_real_loss = tf.reduce_mean( 60 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_s_real, labels=tf.ones_like(self.disc_s_real))) 61 | self.disc_s_fake_loss = tf.reduce_mean( 62 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_s_fake, labels=tf.zeros_like(self.disc_s_fake))) 63 | self.disc_s_loss = self.disc_s_real_loss + self.disc_s_fake_loss # Combined 64 | 65 | 66 | # Bags 67 | self.disc_b_real_loss = tf.reduce_mean( 68 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_real, labels=tf.ones_like(self.disc_b_real))) 69 | self.disc_b_fake_loss = tf.reduce_mean( 70 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_fake, labels=tf.zeros_like(self.disc_b_fake))) 71 | self.disc_b_loss = self.disc_b_real_loss + self.disc_b_fake_loss 72 | 73 | # Total Discriminator Loss 74 | self.disc_loss = self.disc_b_loss + self.disc_s_loss 75 | 76 | def get_trainable_params(self): 77 | ''' 78 | This function is useful for obtaining trainable parameters which need to be trained either with discriminator or generator loss 79 | :return: 80 | ''' 81 | self.disc_params = [] 82 | self.gen_params = [] 83 | for var in tf.trainable_variables(): 84 | if 'generator' in var.name: 85 | self.gen_params.append(var) 86 | elif 'discriminator' in var.name: 87 | self.disc_params.append(var) 88 | 89 | def define_optimizer(self): 90 | self.disc_optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.disc_loss, var_list=self.disc_params) 91 | self.gen_optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.gen_loss, var_list=self.gen_params) 92 | 93 | def summary_(self): 94 | # Store the losses 95 | tf.summary.scalar("gen_loss", self.gen_loss) 96 | tf.summary.scalar("gen_s_loss", self.gen_s_loss) 97 | tf.summary.scalar("gen_b_loss", self.gen_b_loss) 98 | tf.summary.scalar("const_loss_s", self.const_loss_s) 99 | tf.summary.scalar("const_loss_b", self.const_loss_b) 100 | tf.summary.scalar("disc_loss", self.disc_loss) 101 | tf.summary.scalar("disc_b_loss", self.disc_b_loss) 102 | tf.summary.scalar("disc_s_loss", self.disc_s_loss) 103 | 104 | # Histograms for all vars 105 | for var in tf.trainable_variables(): 106 | tf.summary.histogram(var.name, var) 107 | 108 | self.summary_ = tf.summary.merge_all() 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /Chapter09/README.md: -------------------------------------------------------------------------------- 1 | # DiscoGANs 2 | This repo consists of an implementation of DiscoGANs as demonstrated by this paper [DiscoGANs](https://arxiv.org/pdf/1703.05192.pdf) on Handbags and Shoes dataset. 3 | 4 | ### Installations 5 | * This code is checked on using native Python 3 with anaconda 6 | * Create a conda virtual environment and install relevant packages using requirements.txt file 7 | 8 | ``` 9 | pip install requirements.txt 10 | ``` 11 | ### Python Code Run Instructions 12 | To run the code just execute 13 | ``` 14 | python main.py 15 | ``` 16 | On CPU the code might take few hours to run. However, if you use GPUs it should be much faster 17 | #### Dataset 18 | The dataset used for this project is 19 | * [Handbags Dataset](https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2handbags.tar.gz) 20 | * [Shoes data](https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2shoes.tar.gz) 21 | 22 | ### Code Details 23 | Code is pretty self explanatory. There are mainly four files in implementation: 24 | 25 | * main.py: To train and load the model 26 | * utils.py: Contains the helper function to download, preprocess the dataset and also to define Generators and Discriminator networks 27 | * parameters.py: Contains the parameter for the model 28 | * DiscoGAN.py -- Contains the DiscoGAN class defining the model 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /Chapter09/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import os 3 | from utils import * 4 | from DiscoGAN import DiscoGAN 5 | import random 6 | 7 | 8 | 9 | def train(model): 10 | # Load the data first 11 | # Define a function to load the next batch 12 | # start training 13 | 14 | # Define a function to get the data for the next batch 15 | def get_next_batch(BATCH_SIZE, type ="shoes"): 16 | if type == "shoes": 17 | next_batch_indices = random.sample(range(0, X_shoes.shape[0]), BATCH_SIZE) 18 | batch_data = X_shoes[next_batch_indices,:,:,:] 19 | elif type == "bags": 20 | next_batch_indices = random.sample(range(0, X_bags.shape[0]), BATCH_SIZE) 21 | batch_data = X_bags[next_batch_indices, :, :, :] 22 | return batch_data 23 | 24 | # Loading the dataset 25 | print ("Loading Dataset") 26 | X_shoes, X_bags = load_data(load_type='train') 27 | 28 | with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess: 29 | if RESTORE_TRAINING: 30 | saver = tf.train.Saver() 31 | ckpt = tf.train.get_checkpoint_state("./model") 32 | saver.restore(sess, ckpt.model_checkpoint_path) 33 | print('Model Loaded') 34 | start_epoch = int(str(ckpt.model_checkpoint_path).split('-')[-1].split(".")[0]) 35 | print ("Start EPOCH", start_epoch) 36 | else: 37 | saver = tf.train.Saver(tf.global_variables()) 38 | tf.global_variables_initializer().run() 39 | if not os.path.exists("logs"): 40 | os.makedirs("logs") 41 | start_epoch = 0 42 | 43 | # Starting training from here 44 | train_writer = tf.summary.FileWriter(os.getcwd() + '/logs', graph=sess.graph) 45 | print ("Starting Training") 46 | for global_step in range(start_epoch,EPOCHS): 47 | shoe_batch = get_next_batch(BATCH_SIZE,"shoes") 48 | bag_batch = get_next_batch(BATCH_SIZE,"bags") 49 | feed_dict_batch = {model.X_bags: bag_batch, model.X_shoes: shoe_batch} 50 | op_list = [model.disc_optimizer, model.gen_optimizer, model.disc_loss, model.gen_loss, model.summary_] 51 | _, _, disc_loss, gen_loss, summary_ = sess.run(op_list, feed_dict=feed_dict_batch) 52 | shoe_batch = get_next_batch(BATCH_SIZE, "shoes") 53 | bag_batch = get_next_batch(BATCH_SIZE, "bags") 54 | feed_dict_batch = {model.X_bags: bag_batch, model.X_shoes: shoe_batch} 55 | _, gen_loss = sess.run([model.gen_optimizer, model.gen_loss], feed_dict=feed_dict_batch) 56 | if global_step%10 ==0: 57 | train_writer.add_summary(summary_,global_step) 58 | 59 | if global_step%100 == 0: 60 | print("EPOCH:" + str(global_step) + "\tGenerator Loss: " + str(gen_loss) + "\tDiscriminator Loss: " + str(disc_loss)) 61 | 62 | 63 | if global_step % 1000 == 0: 64 | 65 | shoe_sample = get_next_batch(1, "shoes") 66 | bag_sample = get_next_batch(1, "bags") 67 | 68 | ops = [model.gen_s_fake, model.gen_b_fake, model.gen_recon_s, model.gen_recon_b] 69 | gen_s_fake, gen_b_fake, gen_recon_s, gen_recon_b = sess.run(ops, feed_dict={model.X_shoes: shoe_sample, model.X_bags: bag_sample}) 70 | 71 | save_image(global_step, gen_s_fake, str("gen_s_fake_") + str(global_step)) 72 | save_image(global_step,gen_b_fake, str("gen_b_fake_") + str(global_step)) 73 | save_image(global_step, gen_recon_s, str("gen_recon_s_") + str(global_step)) 74 | save_image(global_step, gen_recon_b, str("gen_recon_b_") + str(global_step)) 75 | 76 | if global_step % 1000 == 0: 77 | if not os.path.exists("./model"): 78 | os.makedirs("./model") 79 | saver.save(sess, "./model" + '/model-' + str(global_step) + '.ckpt') 80 | print("Saved Model") 81 | 82 | def main(): 83 | # Get the dataset first. 84 | 85 | if not os.path.exists(os.path.join(os.getcwd(), "bags")): 86 | print("Generating Dataset") 87 | generate_dataset() 88 | # Create the model 89 | print ("Defining the model") 90 | model = DiscoGAN() 91 | print ("Training") 92 | train(model) 93 | 94 | 95 | if __name__ == "__main__": 96 | main() 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /Chapter09/parameters.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | # This file defines all the parameters that are useful 6 | 7 | BATCH_SIZE = 256 8 | LEARNING_RATE = 0.0002 9 | EPOCHS = 100000 10 | RESTORE_TRAINING= False 11 | 12 | -------------------------------------------------------------------------------- /Chapter09/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | backports-abc==0.5 4 | backports.functools-lru-cache==1.5 5 | backports.weakref==1.0.post1 6 | bleach==1.5.0 7 | certifi==2018.4.16 8 | cycler==0.10.0 9 | enum34==1.1.6 10 | funcsigs==1.0.2 11 | functools32==3.2.3.post2 12 | futures==3.2.0 13 | gast==0.2.0 14 | graphviz==0.8 15 | grpcio==1.12.1 16 | html5lib==0.9999999 17 | kiwisolver==1.0.1 18 | Markdown==2.6.11 19 | matplotlib==2.2.2 20 | mkl-fft==1.0.4 21 | mkl-random==1.0.1 22 | mock==2.0.0 23 | mxnet==0.11.0rc1 24 | numpy==1.14.5 25 | olefile==0.45.1 26 | pandas==0.23.2 27 | pbr==4.2.0 28 | Pillow==5.2.0 29 | protobuf==3.5.2 30 | pyparsing==2.2.0 31 | pyro-ppl==0.2.0+9c3d68e 32 | python-dateutil==2.7.3 33 | pytz==2018.5 34 | singledispatch==3.4.0.3 35 | six==1.11.0 36 | subprocess32==3.5.2 37 | tensorboard==1.9.0 38 | tensorflow==1.9.0 39 | termcolor==1.1.0 40 | tornado==5.1 41 | tqdm==4.23.4 42 | Werkzeug==0.14.1 43 | wget==3.2 44 | -------------------------------------------------------------------------------- /Chapter10/CapsNet.py: -------------------------------------------------------------------------------- 1 | from parameter_config import * 2 | from helper_functions import routing, squash 3 | 4 | 5 | 6 | class CapsNet: 7 | def __init__(self): 8 | with tf.variable_scope('Input'): 9 | self.X = tf.placeholder(shape=[None, IMG_WIDTH, IMG_HEIGHT, N_CHANNELS], dtype=tf.float32, name="X") 10 | self.Y = tf.placeholder(shape=[None, N_CLASSES], dtype=tf.float32, name="Y") 11 | self.mask_with_labels = tf.placeholder_with_default(False, shape=(), name="mask_with_labels") 12 | 13 | self.define_network() 14 | self.define_loss() 15 | self.define_accuracy() 16 | self.define_optimizer() 17 | self.summary_() 18 | 19 | def define_network(self): 20 | with tf.variable_scope('Conv1_layer'): 21 | conv1_layer = tf.layers.conv2d(self.X, name="conv1_layer", **CONV1_LAYER_PARAMS) # [batch_size, 20, 20, 256] 22 | 23 | with tf.variable_scope('PrimaryCaps_layer'): 24 | conv2_layer = tf.layers.conv2d(conv1_layer, name="conv2_layer", **CONV2_LAYER_PARAMS) # [batch_size, 6, 6, 256] 25 | 26 | primary_caps = tf.reshape(conv2_layer, (BATCH_SIZE, NCAPS_CAPS1, CAPS_DIM_CAPS1, 1), name="primary_caps") # [batch_size, 1152, 8, 1] 27 | primary_caps_output = squash(primary_caps, name="caps1_output") 28 | # [batch_size, 1152, 8, 1] 29 | 30 | # DigitCaps layer, return [batch_size, 10, 16, 1] 31 | with tf.variable_scope('DigitCaps_layer'): 32 | digitcaps_input = tf.reshape(primary_caps_output, shape=(BATCH_SIZE, NCAPS_CAPS1, 1, CAPS_DIM_CAPS1, 1)) # [batch_size, 1152, 1, 8, 1] 33 | # [batch_size, 1152, 10, 1, 1] 34 | self.digitcaps_output = routing(digitcaps_input) # [batch_size, 10, 16, 1] 35 | 36 | # Decoder 37 | with tf.variable_scope('Masking'): 38 | self.v_norm = tf.sqrt(tf.reduce_sum(tf.square(self.digitcaps_output), axis=2, keep_dims=True) + tf.keras.backend.epsilon()) 39 | 40 | predicted_class = tf.to_int32(tf.argmax(self.v_norm, axis=1)) #[batch_size, 10,1,1] 41 | self.y_predicted = tf.reshape(predicted_class, shape=(BATCH_SIZE,)) #[batch_size] 42 | y_predicted_one_hot = tf.one_hot(self.y_predicted, depth=NCAPS_CAPS2) #[batch_size,10] One hot operation 43 | 44 | reconstruction_targets = tf.cond(self.mask_with_labels, # condition 45 | lambda: self.Y, # if True (Training) 46 | lambda: y_predicted_one_hot, # if False (Test) 47 | name="reconstruction_targets") 48 | 49 | digitcaps_output_masked = tf.multiply(tf.squeeze(self.digitcaps_output), tf.expand_dims(reconstruction_targets, -1)) # [batch_size, 10, 16] 50 | 51 | 52 | #Flattening as suggested by the paper 53 | decoder_input = tf.reshape(digitcaps_output_masked, [BATCH_SIZE, -1]) # [batch_size, 160] 54 | 55 | 56 | with tf.variable_scope('Decoder'): 57 | fc1 = tf.layers.dense(decoder_input, layer1_size, activation=tf.nn.relu, name="FC1") # [batch_size, 512] 58 | fc2 = tf.layers.dense(fc1, layer2_size, activation=tf.nn.relu, name="FC2") # [batch_size, 1024] 59 | self.decoder_output = tf.layers.dense(fc2, output_size, activation=tf.nn.sigmoid, name="FC3") # [batch_size, 784] 60 | 61 | 62 | def define_loss(self): 63 | # Margin Loss 64 | with tf.variable_scope('Margin_Loss'): 65 | # max(0, m_plus-||v_c||)^2 66 | positive_error = tf.square(tf.maximum(0., 0.9 - self.v_norm)) # [batch_size, 10, 1, 1] 67 | # max(0, ||v_c||-m_minus)^2 68 | negative_error = tf.square(tf.maximum(0., self.v_norm - 0.1)) # [batch_size, 10, 1, 1] 69 | # reshape: [batch_size, 10, 1, 1] => [batch_size, 10] 70 | positive_error = tf.reshape(positive_error, shape=(BATCH_SIZE, -1)) 71 | negative_error = tf.reshape(negative_error, shape=(BATCH_SIZE, -1)) 72 | 73 | Loss_vec = self.Y * positive_error + 0.5 * (1- self.Y) * negative_error # [batch_size, 10] 74 | self.margin_loss = tf.reduce_mean(tf.reduce_sum(Loss_vec, axis=1), name="margin_loss") 75 | 76 | # Reconstruction Loss 77 | with tf.variable_scope('Reconstruction_Loss'): 78 | ground_truth = tf.reshape(self.X, shape=(BATCH_SIZE, -1)) 79 | self.reconstruction_loss = tf.reduce_mean(tf.square(self.decoder_output - ground_truth)) 80 | 81 | # Combined Loss 82 | with tf.variable_scope('Combined_Loss'): 83 | self.combined_loss = self.margin_loss + 0.0005 * self.reconstruction_loss 84 | 85 | def define_accuracy(self): 86 | with tf.variable_scope('Accuracy'): 87 | correct_predictions = tf.equal(tf.to_int32(tf.argmax(self.Y, axis=1)), self.y_predicted) 88 | self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) 89 | 90 | def define_optimizer(self): 91 | with tf.variable_scope('Optimizer'): 92 | optimizer = tf.train.AdamOptimizer() 93 | self.train_optimizer = optimizer.minimize(self.combined_loss, name="training_optimizer") 94 | 95 | def summary_(self): 96 | reconstructed_image = tf.reshape(self.decoder_output, shape=(BATCH_SIZE, IMG_WIDTH, IMG_HEIGHT, N_CHANNELS)) 97 | summary_list = [tf.summary.scalar('Loss/margin_loss', self.margin_loss), 98 | tf.summary.scalar('Loss/reconstruction_loss', self.reconstruction_loss), 99 | tf.summary.image('original', self.X), 100 | tf.summary.image('reconstructed', reconstructed_image)] 101 | self.summary_ = tf.summary.merge(summary_list) 102 | -------------------------------------------------------------------------------- /Chapter10/README.md: -------------------------------------------------------------------------------- 1 | # Capsule Networks 2 | This repo consists of an implementation of Capsule Networks as demonstrated by this paper [Dynamic Routing Between Capsules](https://arxiv.org/abs/1710.09829) on Fashion MNIST dataset. 3 | ### Installations 4 | * This code is checked on using native Python 3 with anaconda 5 | * Create a conda virtual environment and install relevant packages using requirements.txt file 6 | ``` 7 | pip install requirements.txt 8 | ``` 9 | ### Python Code Run Instructions 10 | To run the code just execute 11 | ``` 12 | python main.py 13 | ``` 14 | On CPU the code might take few hours to run. However, if you use GPUs it should be much faster 15 | #### Dataset 16 | The dataset used for this illustration is [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) 17 | 18 | ### Code Details 19 | Code is pretty self explanatory. There are mainly four files in implementation: 20 | * main.py -- It contains three parts: 21 | * Train: To train the model 22 | * Test: Test the model on testing dataset 23 | * Visualize: Visualize few reconstructed images for further interpretation 24 | * parameter_config.py -- Contains all the parameter declarations 25 | * CapsNet.py -- Capsule network implementation. 26 | * helper_functions.py -- Helper functions. Also contains the **squashing** and **routing** functions needed by capsule networks 27 | 28 | Note that the model was not tuned for best hyperparameters. Feel free to play around. 29 | -------------------------------------------------------------------------------- /Chapter10/data/fashion-mnist/t10k-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/data/fashion-mnist/t10k-images-idx3-ubyte -------------------------------------------------------------------------------- /Chapter10/data/fashion-mnist/train-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/data/fashion-mnist/train-images-idx3-ubyte -------------------------------------------------------------------------------- /Chapter10/data/fashion-mnist/train-labels-idx1-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/data/fashion-mnist/train-labels-idx1-ubyte -------------------------------------------------------------------------------- /Chapter10/imgs/img1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/imgs/img1.png -------------------------------------------------------------------------------- /Chapter10/imgs/img2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/imgs/img2.png -------------------------------------------------------------------------------- /Chapter10/imgs/img3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/imgs/img3.png -------------------------------------------------------------------------------- /Chapter10/imgs/img4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/imgs/img4.png -------------------------------------------------------------------------------- /Chapter10/main.py: -------------------------------------------------------------------------------- 1 | from CapsNet import CapsNet 2 | from helper_functions import * 3 | import os 4 | 5 | 6 | def train(model): 7 | global fd_train 8 | x_train, y_train, x_valid, y_valid = load_data(load_type='train') 9 | print('Data set Loaded') 10 | num_batches = int(y_train.shape[0] / BATCH_SIZE) 11 | if not os.path.exists(CHECKPOINT_PATH_DIR): 12 | os.makedirs(CHECKPOINT_PATH_DIR) 13 | 14 | with tf.Session() as sess: 15 | if RESTORE_TRAINING: 16 | saver = tf.train.Saver() 17 | ckpt = tf.train.get_checkpoint_state(CHECKPOINT_PATH_DIR) 18 | saver.restore(sess, ckpt.model_checkpoint_path) 19 | print('Model Loaded') 20 | start_epoch = int(str(ckpt.model_checkpoint_path).split('-')[-1]) 21 | train_file, val_file, best_loss_val = load_existing_details() 22 | else: 23 | saver = tf.train.Saver(tf.global_variables()) 24 | tf.global_variables_initializer().run() 25 | print('All variables initialized') 26 | train_file, val_file = write_progress('train') 27 | start_epoch = 0 28 | best_loss_val = np.infty 29 | print('Training Starts') 30 | acc_batch_all = loss_batch_all = np.array([]) 31 | train_writer = tf.summary.FileWriter(LOG_DIR, sess.graph) 32 | for epoch in range(start_epoch, EPOCHS): 33 | # Shuffle the input data 34 | x_train, y_train = shuffle_data(x_train, y_train) 35 | for step in range(num_batches): 36 | start = step * BATCH_SIZE 37 | end = (step + 1) * BATCH_SIZE 38 | global_step = epoch * num_batches + step 39 | x_batch, y_batch = x_train[start:end], y_train[start:end] 40 | feed_dict_batch = {model.X: x_batch, model.Y: y_batch, model.mask_with_labels: True} 41 | if not (step % 100): 42 | _, acc_batch, loss_batch, summary_ = sess.run([model.train_optimizer, model.accuracy, 43 | model.combined_loss, model.summary_], 44 | feed_dict=feed_dict_batch) 45 | train_writer.add_summary(summary_, global_step) 46 | acc_batch_all = np.append(acc_batch_all, acc_batch) 47 | loss_batch_all = np.append(loss_batch_all, loss_batch) 48 | mean_acc,mean_loss = np.mean(acc_batch_all),np.mean(loss_batch_all) 49 | summary_ = tf.Summary(value=[tf.Summary.Value(tag='Accuracy', simple_value=mean_acc)]) 50 | train_writer.add_summary(summary_, global_step) 51 | summary_ = tf.Summary(value=[tf.Summary.Value(tag='Loss/combined_loss', simple_value=mean_loss)]) 52 | train_writer.add_summary(summary_, global_step) 53 | 54 | train_file.write(str(global_step) + ',' + str(mean_acc) + ',' + str(mean_loss) + "\n") 55 | train_file.flush() 56 | print(" Batch #{0}, Epoch: #{1}, Mean Training loss: {2:.4f}, Mean Training accuracy: {3:.01%}".format( 57 | step, (epoch+1), mean_loss, mean_acc)) 58 | acc_batch_all = loss_batch_all = np.array([]) 59 | else: 60 | _, acc_batch, loss_batch = sess.run([model.train_optimizer, model.accuracy, model.combined_loss], 61 | feed_dict=feed_dict_batch) 62 | acc_batch_all = np.append(acc_batch_all, acc_batch) 63 | loss_batch_all = np.append(loss_batch_all, loss_batch) 64 | 65 | # Validation metrics after each EPOCH 66 | acc_val, loss_val = eval_performance(sess, model, x_valid, y_valid) 67 | val_file.write(str(epoch + 1) + ',' + str(acc_val) + ',' + str(loss_val) + '\n') 68 | val_file.flush() 69 | print("\rEpoch: {} Mean Train Accuracy: {:.4f}% ,Mean Val accuracy: {:.4f}% Loss: {:.6f}{}".format( 70 | epoch + 1, mean_acc * 100, acc_val * 100, loss_val, 71 | " (improved)" if loss_val < best_loss_val else "")) 72 | 73 | # Saving the improved model 74 | if loss_val < best_loss_val: 75 | saver.save(sess, CHECKPOINT_PATH_DIR + '/model.tfmodel', global_step=epoch + 1) 76 | best_loss_val = loss_val 77 | train_file.close() 78 | val_file.close() 79 | 80 | 81 | def test(model): 82 | x_test, y_test = load_data(load_type='test') 83 | print('Loaded the test dataset') 84 | test_file = write_progress('test') 85 | saver = tf.train.Saver() 86 | ckpt = tf.train.get_checkpoint_state(CHECKPOINT_PATH_DIR) 87 | with tf.Session() as sess: 88 | saver.restore(sess, ckpt.model_checkpoint_path) 89 | print('Model Loaded') 90 | acc_test, loss_test = eval_performance(sess, model, x_test, y_test) 91 | test_file.write(str(acc_test) + ',' + str(loss_test) + '\n') 92 | test_file.flush() 93 | print('-----------------------------------------------------------------------------') 94 | print("Test Set Loss: {0:.4f}, Test Set Accuracy: {1:.01%}".format(loss_test, acc_test)) 95 | 96 | 97 | def reconstruct_sample(model, n_samples=5): 98 | x_test, y_test = load_data(load_type='test') 99 | sample_images, sample_labels = x_test[:BATCH_SIZE], y_test[:BATCH_SIZE] 100 | saver = tf.train.Saver() 101 | ckpt = tf.train.get_checkpoint_state(CHECKPOINT_PATH_DIR) 102 | with tf.Session() as sess: 103 | saver.restore(sess, ckpt.model_checkpoint_path) 104 | feed_dict_samples = {model.X: sample_images, model.Y: sample_labels} 105 | decoder_out, y_predicted = sess.run([model.decoder_output, model.y_predicted], 106 | feed_dict=feed_dict_samples) 107 | reconstruction(sample_images, sample_labels, decoder_out, y_predicted, n_samples) 108 | 109 | 110 | def main(_): 111 | # Train the model and evaluate on test set 112 | model = CapsNet() 113 | print ("Step1: Train") 114 | train(model) 115 | print("Step2: Testing the performance of model on the Test Set") 116 | test(model) 117 | print ("Step3: Reconstructing some sample images") 118 | reconstruct_sample(model,n_samples =3) 119 | 120 | if __name__ == "__main__": 121 | tf.app.run() 122 | -------------------------------------------------------------------------------- /Chapter10/parameter_config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import tensorflow as tf 3 | import numpy as np 4 | 5 | parser = argparse.ArgumentParser() 6 | 7 | ##### Arguments #### 8 | 9 | ## Fashion MNIST Parameters 10 | N_CLASSES = 10 11 | IMG_WIDTH = 28 12 | IMG_HEIGHT = 28 13 | N_CHANNELS = 1 # Number of Input Channels 14 | IMAGE_LABELS = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 15 | 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] 16 | 17 | 18 | 19 | ## Model Parameters 20 | CONV1_LAYER_PARAMS = {"filters": 256, 21 | "kernel_size": 9, 22 | "activation": tf.nn.relu, 23 | "padding": "valid", 24 | "strides": 1 25 | } 26 | 27 | # Parameters of PrimaryCaps_layer 28 | MAPS_CAPS1 = 32 29 | NCAPS_CAPS1 = MAPS_CAPS1*6*6 # Total number of primary capsules = 1152 30 | CAPS_DIM_CAPS1 = 8 # Dimensions of each capsule 31 | 32 | CONV2_LAYER_PARAMS = {"filters": MAPS_CAPS1 * CAPS_DIM_CAPS1, # Total Convolutional Filters = 256 33 | "kernel_size": 9, 34 | "strides": 2, 35 | "padding": "valid", 36 | "activation": tf.nn.relu} 37 | 38 | # Parameters of DigitCaps_layer 39 | NCAPS_CAPS2 = 10 40 | CAPS_DIM_CAPS2 = 16 # Dimension of each capsule in layer 2 41 | 42 | # Decoder Parameters 43 | layer1_size = 512 44 | layer2_size = 1024 45 | output_size = IMG_WIDTH* IMG_HEIGHT 46 | 47 | ## Loss 48 | 49 | # Margin Loss 50 | M_PLUS = 0.9 51 | M_MINUS= 0.1 52 | LAMBDA = 0.5 53 | 54 | # Reconstruction Loss 55 | ALPHA = 0.0005 56 | 57 | # Training Params 58 | BATCH_SIZE = 128 59 | EPOCHS = 20 60 | ROUTING_ITERATIONS = 3 # Routing Iterations 61 | STDEV = 0.01 # STDEV for Weight Initialization 62 | 63 | 64 | ## Environment and Save Directories 65 | RESTORE_TRAINING = False # Restores the trained model 66 | CHECKPOINT_PATH_DIR = './model_dir' 67 | LOG_DIR = './logs/' 68 | RESULTS_DIR = './results/' 69 | STEPS_TO_SAVE = 100 # Frequency (in steps) of saving the train result 70 | 71 | ## Visualization Parameters 72 | N_SAMPLES = 3 # No. of Samples Images to Save 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /Chapter10/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.2.2 2 | astor==0.7.1 3 | bleach==1.5.0 4 | certifi==2018.4.16 5 | cycler==0.10.0 6 | gast==0.2.0 7 | grpcio==1.13.0 8 | html5lib==0.9999999 9 | kiwisolver==1.0.1 10 | Markdown==2.6.11 11 | matplotlib==2.2.2 12 | numpy==1.14.5 13 | pandas==0.23.2 14 | protobuf==3.6.0 15 | pyparsing==2.2.0 16 | pyro-ppl==0.2.0+9c3d68e 17 | python-dateutil==2.7.3 18 | pytz==2018.5 19 | six==1.11.0 20 | tensorboard==1.8.0 21 | tensorflow==1.8.0 22 | termcolor==1.1.0 23 | tqdm==4.23.4 24 | Werkzeug==0.14.1 25 | -------------------------------------------------------------------------------- /Chapter10/results/decoder_images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/results/decoder_images.png -------------------------------------------------------------------------------- /Chapter10/results/input_images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter10/results/input_images.png -------------------------------------------------------------------------------- /Chapter10/results/test.csv: -------------------------------------------------------------------------------- 1 | accuracy,loss 2 | 0.9052483974358975,0.07073675057826898 3 | -------------------------------------------------------------------------------- /Chapter10/results/train.csv: -------------------------------------------------------------------------------- 1 | step,accuracy,loss 2 | 0,0.09375,0.8095152378082275 3 | 100,0.5715625,0.31245121836662293 4 | 200,0.791328125,0.1502521462738514 5 | 300,0.8415625,0.11915692172944546 6 | 400,0.84421875,0.11319767959415912 7 | 429,0.8504849137931034,0.1083738212955409 8 | 529,0.860234375,0.10172661993652582 9 | 629,0.868828125,0.09590242583304644 10 | 729,0.87859375,0.09020579047501087 11 | 829,0.881328125,0.08867673952132464 12 | 858,0.8736530172413793,0.09718997262675187 13 | 958,0.8878125,0.08388861279934645 14 | 1058,0.88640625,0.08253685686737298 15 | 1158,0.893359375,0.07973779056221247 16 | 1258,0.891171875,0.07954067450016737 17 | 1287,0.8997844827586207,0.07836812965828797 18 | 1387,0.901484375,0.07362835209816694 19 | 1487,0.90171875,0.073233842253685 20 | 1587,0.9,0.07385420694947242 21 | 1687,0.902109375,0.07208837758749724 22 | 1716,0.8984375,0.07328963138420007 23 | 1816,0.91078125,0.06652853142470122 24 | 1916,0.9065625,0.06734239399433135 25 | 2016,0.912578125,0.06600361950695514 26 | 2116,0.912890625,0.06641654968261719 27 | 2145,0.9113685344827587,0.06858355320733169 28 | 2245,0.918984375,0.06205341562628746 29 | 2345,0.91875,0.06159479677677154 30 | 2445,0.921953125,0.059774469286203384 31 | 2545,0.920078125,0.05970324773341417 32 | 2574,0.9172952586206896,0.06360147764970517 33 | 2674,0.92828125,0.05438732739537954 34 | 2774,0.924296875,0.057069247346371414 35 | 2874,0.92625,0.05559893842786551 36 | 2974,0.925390625,0.05615185607224703 37 | 3003,0.9216056034482759,0.05873074857839223 38 | 3103,0.928828125,0.052739509530365464 39 | 3203,0.936875,0.05021806042641401 40 | 3303,0.931171875,0.05246757220476866 41 | 3403,0.9328125,0.05212257364764809 42 | 3432,0.9321120689655172,0.05062765621676527 43 | 3532,0.943046875,0.04521637219935656 44 | 3632,0.941484375,0.046342298053205014 45 | 3732,0.93953125,0.04727683622390032 46 | 3832,0.93625,0.050311169661581515 47 | 3861,0.9420797413793104,0.04484546724064597 48 | 3961,0.94859375,0.041767358966171744 49 | 4061,0.946640625,0.04286794146522879 50 | 4161,0.943359375,0.04435352893546224 51 | 4261,0.946328125,0.04342479040846228 52 | 4290,0.943426724137931,0.046497174506557395 53 | 4390,0.955390625,0.036580598633736375 54 | 4490,0.954453125,0.03863168263807893 55 | 4590,0.948125,0.041220346465706825 56 | 4690,0.95,0.040284305810928345 57 | 4719,0.9488146551724138,0.04061482782507765 58 | 4819,0.9578125,0.035484369713813066 59 | 4919,0.95703125,0.03529309280216694 60 | 5019,0.956328125,0.036770439138635994 61 | 5119,0.95359375,0.037882247362285855 62 | 5148,0.953125,0.03647255011159798 63 | 5248,0.9628125,0.031473791114985945 64 | 5348,0.96421875,0.03189338462427258 65 | 5448,0.9603125,0.03374789277091622 66 | 5548,0.9575,0.035432015694677826 67 | 5577,0.9574353448275862,0.03472962391016812 68 | 5677,0.964453125,0.029090104391798377 69 | 5777,0.966953125,0.02917375748977065 70 | 5877,0.963515625,0.03223260316997766 71 | 5977,0.964453125,0.03198644069954753 72 | 6006,0.9620150862068966,0.03241825675399139 73 | 6106,0.97171875,0.026493920162320137 74 | 6206,0.97,0.026837863493710756 75 | 6306,0.96671875,0.027890612687915563 76 | 6406,0.96765625,0.02863279345445335 77 | 6435,0.9636314655172413,0.031100800614161737 78 | 6535,0.975546875,0.02251940202899277 79 | 6635,0.972421875,0.024686241755262017 80 | 6735,0.971875,0.02542632515542209 81 | 6835,0.96671875,0.02810399920679629 82 | 6864,0.9676724137931034,0.02780111850207222 83 | 6964,0.979765625,0.02030867464840412 84 | 7064,0.97625,0.021939718220382928 85 | 7164,0.972109375,0.02516912104561925 86 | 7264,0.97203125,0.02568971554748714 87 | 7293,0.9676724137931034,0.026753147518069578 88 | 7393,0.97859375,0.02016483658924699 89 | 7493,0.97765625,0.020626045214012265 90 | 7593,0.977109375,0.02157713245600462 91 | 7693,0.97578125,0.021756027741357684 92 | 7722,0.9738685344827587,0.024293266429469502 93 | 7822,0.981953125,0.017713550990447403 94 | 7922,0.981015625,0.019090190320275723 95 | 8022,0.98,0.01958840411156416 96 | 8122,0.978125,0.021132299336604776 97 | 8151,0.9719827586206896,0.02502705073305245 98 | 8251,0.981171875,0.017996025113388894 99 | 8351,0.98234375,0.017647953145205973 100 | 8451,0.9825,0.01793428724166006 101 | 8551,0.980546875,0.019694409035146236 102 | -------------------------------------------------------------------------------- /Chapter10/results/validation.csv: -------------------------------------------------------------------------------- 1 | epoch,accuracy,loss 2 | 1,0.8547676282051282,0.10413328806559245 3 | 2,0.8778044871794872,0.09009873332121433 4 | 3,0.8844150641025641,0.08420521918779765 5 | 4,0.8936298076923077,0.07686319670233971 6 | 5,0.8984375,0.07346154472384697 7 | 6,0.9008413461538461,0.07146473582356404 8 | 7,0.9060496794871795,0.07092234234397228 9 | 8,0.9038461538461539,0.07032975210593297 10 | 9,0.913261217948718,0.06738955021286622 11 | 10,0.90625,0.0685105198659958 12 | 11,0.9116586538461539,0.067448751666607 13 | 12,0.9072516025641025,0.06859098890653023 14 | 13,0.9096554487179487,0.06998685479928286 15 | 14,0.9112580128205128,0.067459581945187 16 | 15,0.9080528846153846,0.07073222329983345 17 | 16,0.9116586538461539,0.07026838788237327 18 | 17,0.9068509615384616,0.07060776440761028 19 | 18,0.9088541666666666,0.07340454673155761 20 | 19,0.9072516025641025,0.07282687675876495 21 | 20,0.9120592948717948,0.07225485356190266 22 | -------------------------------------------------------------------------------- /Chapter11/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter11/model.png -------------------------------------------------------------------------------- /Chapter11/netflix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter11/netflix.png -------------------------------------------------------------------------------- /Chapter12/Images.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/Images.zip -------------------------------------------------------------------------------- /Chapter12/data/buses/Community-Transit-15808-Double-Tall-in-Downtown-Seattle.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/Community-Transit-15808-Double-Tall-in-Downtown-Seattle.JPG -------------------------------------------------------------------------------- /Chapter12/data/buses/Torrens-Transit-Australian-Bus-Manufacturing--CB62A--bodied-.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/Torrens-Transit-Australian-Bus-Manufacturing--CB62A--bodied-.jpg -------------------------------------------------------------------------------- /Chapter12/data/buses/bus-2554946-960-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/bus-2554946-960-720.jpg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-12.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-12.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-13.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-13.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-14.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-14.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-15.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-15.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-16.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-16.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-17.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-17.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-18.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-18.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-19.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-19.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-2.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-20.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-20.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-21.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-21.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-22.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-22.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-23.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-23.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-24.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-24.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-26.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-26.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-28.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-28.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-29.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-29.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-30.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-30.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-31.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-31.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-32.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-32.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-4.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-5.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/image-9.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/image-9.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-10.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-10.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-15.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-15.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-16.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-16.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-18.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-18.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-19.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-19.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-2.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-20.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-20.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-21.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-21.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-23.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-23.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-24.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-24.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-25.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-25.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-26.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-26.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-27.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-27.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-28.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-28.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-29.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-29.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-30.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-30.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-4.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-5.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-7.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-7.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-8.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-8.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images-9.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images-9.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/images.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/images.jpeg -------------------------------------------------------------------------------- /Chapter12/data/buses/london-2665352-960-720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/buses/london-2665352-960-720.jpg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-10.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-10.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-11.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-11.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-12.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-12.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-13.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-13.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-14.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-14.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-15.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-15.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-16.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-16.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-17.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-17.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-19.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-19.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-2.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-20.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-20.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-21.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-21.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-22.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-22.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-23.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-23.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-24.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-24.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-25.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-25.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-27.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-27.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-28.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-28.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-29.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-29.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-3.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-4.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-5.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-7.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-7.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-8.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-8.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/image-9.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/image-9.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-13.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-13.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-14.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-14.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-15.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-15.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-19.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-19.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-2.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-20.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-20.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-21.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-21.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-23.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-23.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-26.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-26.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-27.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-27.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-28.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-28.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-3.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-4.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-5.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-7.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-7.jpeg -------------------------------------------------------------------------------- /Chapter12/data/cars/images-8.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter12/data/cars/images-8.jpeg -------------------------------------------------------------------------------- /Chapter12/distributed.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | cluster = tf.train.ClusterSpec({ 4 | "worker": [ 5 | "localhost:2222", 6 | "localhost:2223" 7 | ]}) 8 | # Define Servers 9 | worker0 = tf.train.Server(cluster, job_name="worker", task_index=0) 10 | worker1 = tf.train.Server(cluster, job_name="worker", task_index=1) 11 | 12 | 13 | with tf.device("/job:worker/task:1"): 14 | a = tf.constant(3.0, dtype=tf.float32) 15 | b = tf.constant(4.0) 16 | add_node = tf.add(a,b) 17 | 18 | 19 | with tf.device("/job:worker/task:0"): 20 | mul_node = a * b 21 | 22 | 23 | with tf.Session("grpc://192.168.1.4:2222") as sess: 24 | result = sess.run([add_node, mul_node]) 25 | print(result) 26 | -------------------------------------------------------------------------------- /Chapter12/mnist_TFoS.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | tf.logging.set_verbosity(tf.logging.INFO) 9 | 10 | 11 | def cnn_model_fn(features, labels, mode): 12 | """Model function for CNN.""" 13 | # Input Layer 14 | # Reshape X to 4-D tensor: [batch_size, width, height, channels] 15 | # MNIST images are 28x28 pixels, and have one color channel 16 | input_layer = tf.reshape(features["x"], [-1, 28, 28, 1]) 17 | 18 | # Convolutional Layer #1 19 | # Computes 32 features using a 5x5 filter with ReLU activation. 20 | # Padding is added to preserve width and height. 21 | # Input Tensor Shape: [batch_size, 28, 28, 1] 22 | # Output Tensor Shape: [batch_size, 28, 28, 32] 23 | conv1 = tf.layers.conv2d( 24 | inputs=input_layer, 25 | filters=32, 26 | kernel_size=[5, 5], 27 | padding="same", 28 | activation=tf.nn.relu) 29 | 30 | # Pooling Layer #1 31 | # First max pooling layer with a 2x2 filter and stride of 2 32 | # Input Tensor Shape: [batch_size, 28, 28, 32] 33 | # Output Tensor Shape: [batch_size, 14, 14, 32] 34 | pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) 35 | 36 | # Convolutional Layer #2 37 | # Computes 64 features using a 5x5 filter. 38 | # Padding is added to preserve width and height. 39 | # Input Tensor Shape: [batch_size, 14, 14, 32] 40 | # Output Tensor Shape: [batch_size, 14, 14, 64] 41 | conv2 = tf.layers.conv2d( 42 | inputs=pool1, 43 | filters=64, 44 | kernel_size=[5, 5], 45 | padding="same", 46 | activation=tf.nn.relu) 47 | 48 | # Pooling Layer #2 49 | # Second max pooling layer with a 2x2 filter and stride of 2 50 | # Input Tensor Shape: [batch_size, 14, 14, 64] 51 | # Output Tensor Shape: [batch_size, 7, 7, 64] 52 | pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) 53 | 54 | # Flatten tensor into a batch of vectors 55 | # Input Tensor Shape: [batch_size, 7, 7, 64] 56 | # Output Tensor Shape: [batch_size, 7 * 7 * 64] 57 | pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) 58 | 59 | # Dense Layer 60 | # Densely connected layer with 1024 neurons 61 | # Input Tensor Shape: [batch_size, 7 * 7 * 64] 62 | # Output Tensor Shape: [batch_size, 1024] 63 | dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) 64 | 65 | # Add dropout operation; 0.6 probability that element will be kept 66 | dropout = tf.layers.dropout( 67 | inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) 68 | 69 | # Logits layer 70 | # Input Tensor Shape: [batch_size, 1024] 71 | # Output Tensor Shape: [batch_size, 10] 72 | logits = tf.layers.dense(inputs=dropout, units=10) 73 | 74 | predictions = { 75 | # Generate predictions (for PREDICT and EVAL mode) 76 | "classes": tf.argmax(input=logits, axis=1), 77 | # Add `softmax_tensor` to the graph. It is used for PREDICT and by the 78 | # `logging_hook`. 79 | "probabilities": tf.nn.softmax(logits, name="softmax_tensor") 80 | } 81 | if mode == tf.estimator.ModeKeys.PREDICT: 82 | return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) 83 | 84 | # Calculate Loss (for both TRAIN and EVAL modes) 85 | loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) 86 | 87 | # Configure the Training Op (for TRAIN mode) 88 | if mode == tf.estimator.ModeKeys.TRAIN: 89 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) 90 | train_op = optimizer.minimize( 91 | loss=loss, 92 | global_step=tf.train.get_global_step()) 93 | return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) 94 | 95 | # Add evaluation metrics (for EVAL mode) 96 | eval_metric_ops = { 97 | "accuracy": tf.metrics.accuracy( 98 | labels=labels, predictions=predictions["classes"])} 99 | return tf.estimator.EstimatorSpec( 100 | mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) 101 | 102 | 103 | def main(args, ctx): 104 | # Load training and eval data 105 | mnist = tf.contrib.learn.datasets.mnist.read_data_sets(args.data_dir) 106 | train_data = mnist.train.images # Returns np.array 107 | train_labels = np.asarray(mnist.train.labels, dtype=np.int32) 108 | eval_data = mnist.test.images # Returns np.array 109 | eval_labels = np.asarray(mnist.test.labels, dtype=np.int32) 110 | 111 | # Create the Estimator 112 | mnist_classifier = tf.estimator.Estimator( 113 | model_fn=cnn_model_fn, model_dir=args.model) 114 | 115 | # Set up logging for predictions 116 | # Log the values in the "Softmax" tensor with label "probabilities" 117 | tensors_to_log = {"probabilities": "softmax_tensor"} 118 | logging_hook = tf.train.LoggingTensorHook( 119 | tensors=tensors_to_log, every_n_iter=50) 120 | 121 | # Train the model 122 | train_input_fn = tf.estimator.inputs.numpy_input_fn( 123 | x={"x": train_data}, 124 | y=train_labels, 125 | batch_size=args.batch_size, 126 | num_epochs=None, 127 | shuffle=True) 128 | 129 | 130 | # Evaluate the model and print results 131 | eval_input_fn = tf.estimator.inputs.numpy_input_fn( 132 | x={"x": eval_data}, 133 | y=eval_labels, 134 | num_epochs=1, 135 | shuffle=False) 136 | # eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) 137 | # print(eval_results) 138 | 139 | # Using tf.estimator.train_and_evaluate 140 | train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=args.steps, hooks=[logging_hook]) 141 | eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) 142 | tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec) 143 | 144 | 145 | if __name__ == "__main__": 146 | # tf.app.run() 147 | 148 | from pyspark.context import SparkContext 149 | from pyspark.conf import SparkConf 150 | from tensorflowonspark import TFCluster 151 | import argparse 152 | 153 | sc = SparkContext(conf=SparkConf().setAppName("mnist_spark")) 154 | executors = sc._conf.get("spark.executor.instances") 155 | num_executors = int(executors) if executors is not None else 1 156 | 157 | parser = argparse.ArgumentParser() 158 | parser.add_argument("--batch_size", help="number of records per batch", type=int, default=100) 159 | parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors) 160 | parser.add_argument("--data_dir", help="path to MNIST data", default="MNIST-data") 161 | parser.add_argument("--model", help="path to save model/checkpoint", default="mnist_model") 162 | parser.add_argument("--num_ps", help="number of PS nodes in cluster", type=int, default=1) 163 | parser.add_argument("--steps", help="maximum number of steps", type=int, default=1000) 164 | parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true") 165 | 166 | args = parser.parse_args() 167 | print("args:", args) 168 | 169 | cluster = TFCluster.run(sc, main, args, args.cluster_size, args.num_ps, tensorboard=args.tensorboard, input_mode=TFCluster.InputMode.TENSORFLOW, log_dir=args.model, master_node='master') 170 | cluster.shutdown() -------------------------------------------------------------------------------- /Chapter12/object_detection.py: -------------------------------------------------------------------------------- 1 | # To allow Python to find Spark driver 2 | import findspark 3 | findspark.init('/home/ubuntu/spark-2.4.0-bin-hadoop2.7') 4 | 5 | import os 6 | SUBMIT_ARGS = "--packages databricks:spark-deep-learning:1.3.0-spark2.4-s_2.11 pyspark-shell" 7 | os.environ["PYSPARK_SUBMIT_ARGS"] = SUBMIT_ARGS 8 | 9 | 10 | from pyspark.sql import SparkSession 11 | 12 | spark = SparkSession.builder \ 13 | .appName("ImageClassification") \ 14 | .config("spark.executor.memory", "70g") \ 15 | .config("spark.driver.memory", "50g") \ 16 | .config("spark.memory.offHeap.enabled",True) \ 17 | .config("spark.memory.offHeap.size","16g") \ 18 | .getOrCreate() 19 | 20 | import pyspark.sql.functions as f 21 | import sparkdl as dl 22 | from pyspark.ml.image import ImageSchema 23 | 24 | dfbuses = ImageSchema.readImages('data/buses/').withColumn('label', f.lit(0)) 25 | dfcars = ImageSchema.readImages('data/cars/').withColumn('label', f.lit(1)) 26 | 27 | dfbuses.show(5) 28 | dfcars.show(5) 29 | 30 | trainDFbuses, testDFbuses = dfbuses.randomSplit([0.60,0.40], seed = 123) 31 | trainDFcars, testDFcars = dfcars.randomSplit([0.60,0.40], seed = 122) 32 | 33 | 34 | trainDF = trainDFbuses.unionAll(trainDFcars) 35 | testDF = testDFbuses.unionAll(testDFcars) 36 | 37 | from pyspark.ml.classification import LogisticRegression 38 | from pyspark.ml import Pipeline 39 | vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features", 40 | modelName="InceptionV3") 41 | logreg = LogisticRegression(maxIter=30, labelCol="label") 42 | pipeline = Pipeline(stages=[vectorizer, logreg]) 43 | pipeline_model = pipeline.fit(trainDF) 44 | 45 | predictDF = pipeline_model.transform(testDF) 46 | predictDF.select('prediction', 'label').show(n = testDF.toPandas().shape[0], truncate=False) 47 | 48 | predictDF.crosstab('prediction', 'label').show() 49 | 50 | from pyspark.ml.evaluation import MulticlassClassificationEvaluator 51 | scoring = predictDF.select("prediction", "label") 52 | accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy") 53 | rate = accuracy_score.evaluate(scoring)*100 54 | print("accuracy: {}%" .format(round(rate,2))) 55 | -------------------------------------------------------------------------------- /Chapter12/readme.md: -------------------------------------------------------------------------------- 1 | Applying computationally expensive deep learning applications at large scale can be an enormous challenge. Using TensorFlowOnSpark we can distribute these computationally expensive processes in the cluster, enabling us to perform computations at a larger scale. In this chapter, we will explore Yahoo's TensorFlowOnSpark framework for distributed deep learning on spark clusters. And we will apply TensorFlowOnSpark on a large scale dataset of images and train the network to detect objects. This chapter will cover: 2 | 3 | * The need for distributed AI 4 | * Introduction to Apache Spark platform for big data 5 | * TensorFlowOnSpark a Python framework to run TensorFlow on Spark clusters 6 | * Performing object detection using TensorFlowOnSpark and Sparkdl API 7 | -------------------------------------------------------------------------------- /Chapter12/tensorflow_distributed_dl.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import tensorflow as tf 3 | # Add other module libraries you may need 4 | 5 | cluster = tf.train.ClusterSpec( 6 | {'ps':['192.168.1.3:2222'], 7 | 'worker': ['192.168.1.4:2222', 8 | '192.168.1.5:2222', 9 | '192.168.1.6:2222', 10 | '192.168.1.7:2222'] 11 | }) 12 | 13 | job = sys.argv[1] 14 | task_idx = sys.argv[2] 15 | 16 | server = tf.train.Server(cluster, job_name=job, task_index= int(task_idx)) 17 | 18 | if job == 'ps': 19 | # Makes the parameter server wait 20 | # until the Server shuts down 21 | server.join() 22 | else: 23 | # Executes only on worker machines 24 | with tf.device(tf.train.replica_device_setter(cluster=cluster, worker_device='/job:worker/task:'+task_idx)): 25 | #build your model here like you are working on a single machine 26 | print("In worker") 27 | 28 | with tf.Session(server.target): 29 | # Train the model 30 | print("Training") 31 | -------------------------------------------------------------------------------- /Chapter13/README.md: -------------------------------------------------------------------------------- 1 | # Generating Book Scripts using LSTMs 2 | This repo consists of an implementation of Book Script Generation code. 3 | 4 | ### Dataset 5 | The dataset used was from one of the popular Packt book Mastering PostgreSQL by Hans-Jürgen Schönig. We used almost 100 pages from the book and removed any figures, tables and SQL code. 6 | 7 | ### Installations 8 | * This code is checked on using native Python 3 with anaconda 9 | * Create a conda virtual environment and install relevant packages using requirements.txt file 10 | ``` 11 | pip install requirements.txt 12 | ``` 13 | ### Python Code Run Instructions 14 | To run the code just execute 15 | ``` 16 | python main.py 17 | ``` 18 | As the dataset is not large, code can be executed on CPU itself. 19 | 20 | ### Code Details 21 | Code is pretty self explanatory. There are mainly four files in implementation: 22 | * main.py -- It contains three parts: 23 | * main function: To call the relevant functions 24 | * train: trains the model 25 | * parameters.py -- Contains all the parameter declarations 26 | * Model.py -- Contains the Model Class. 27 | * utils.py -- Helper functions. 28 | 29 | Note that the model was not tuned for best hyperparameters. Feel free to play around. 30 | -------------------------------------------------------------------------------- /Chapter13/main.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This is the main file that will be used to generate TV Scripts using Simpson's Dataset 3 | ''' 4 | from utils import * 5 | from parameters import * 6 | from model import Model 7 | 8 | 9 | def train(model,int_text): 10 | 11 | # Creating the checkpoint directory 12 | if not os.path.exists(CHECKPOINT_PATH_DIR): 13 | os.makedirs(CHECKPOINT_PATH_DIR) 14 | 15 | batches = generate_batch_data(int_text) 16 | 17 | with tf.Session() as sess: 18 | if RESTORE_TRAINING: 19 | saver = tf.train.Saver() 20 | ckpt = tf.train.get_checkpoint_state(CHECKPOINT_PATH_DIR) 21 | saver.restore(sess, ckpt.model_checkpoint_path) 22 | print('Model Loaded') 23 | start_epoch = int(str(ckpt.model_checkpoint_path).split('-')[-1]) 24 | else: 25 | start_epoch = 0 26 | tf.global_variables_initializer().run() 27 | print('All variables initialized') 28 | 29 | for epoch in range(start_epoch, NUM_EPOCHS): 30 | saver = tf.train.Saver() 31 | state = sess.run(model.initial_state, {model.X: batches[0][0]}) 32 | 33 | for batch, (x, y) in enumerate(batches): 34 | feed = { 35 | model.X: x, 36 | model.Y: y, 37 | model.initial_state: state} 38 | train_loss, state, _ = sess.run([model.loss, model.final_state, model.train_op], feed) 39 | 40 | if (epoch * len(batches) + batch) % 200 == 0: 41 | print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( 42 | epoch, 43 | batch, 44 | len(batches), 45 | train_loss)) 46 | # Save Checkpoint for restoring if required 47 | saver.save(sess, CHECKPOINT_PATH_DIR + '/model.tfmodel', global_step=epoch + 1) 48 | 49 | # Save Model 50 | saver.save(sess, SAVE_DIR) 51 | print('Model Trained and Saved') 52 | save_params((SEQ_LENGTH, SAVE_DIR)) 53 | 54 | 55 | 56 | def main(): 57 | if os.path.exists("./processed_text.p"): 58 | print ("Processed File Already Present. Proceeding with that") 59 | else: 60 | print ("Preprocessing the data") 61 | preprocess_and_save_data() 62 | 63 | print ("Loading the preprocessed data") 64 | int_text, vocab_to_int, int_to_vocab, token_dict = load_preprocess_file() 65 | 66 | model = Model(int_to_vocab) 67 | print ("Training the model") 68 | train(model,int_text) 69 | 70 | print ("Generating the Book Script") 71 | predict_book_script() 72 | 73 | 74 | if __name__ == "__main__": 75 | main() -------------------------------------------------------------------------------- /Chapter13/model.py: -------------------------------------------------------------------------------- 1 | from parameters import * 2 | import tensorflow as tf 3 | from tensorflow.contrib import seq2seq 4 | 5 | class Model(): 6 | def __init__(self, int_to_vocab): 7 | self.vocab_size = len(int_to_vocab) 8 | 9 | with tf.variable_scope('Input'): 10 | self.X = tf.placeholder(tf.int32, [None, None], name='input') 11 | self.Y = tf.placeholder(tf.int32, [None, None], name='target') 12 | self.input_shape = tf.shape(self.X) 13 | 14 | self.define_network() 15 | self.define_loss() 16 | self.define_optimizer() 17 | 18 | def define_network(self): 19 | # Define an init cell of RNN 20 | with tf.variable_scope("Network"): 21 | # Defining an initial cell state 22 | lstm = tf.contrib.rnn.BasicLSTMCell(RNN_SIZE) 23 | cell = tf.contrib.rnn.MultiRNNCell([lstm] * 2) # Defining two LSTM layers for this case 24 | self.initial_state = cell.zero_state(self.input_shape[0], tf.float32) 25 | self.initial_state = tf.identity(self.initial_state, name="initial_state") 26 | 27 | embedding = tf.Variable(tf.random_uniform((self.vocab_size, RNN_SIZE), -1, 1)) 28 | embed = tf.nn.embedding_lookup(embedding, self.X) 29 | 30 | outputs, self.final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=None, dtype=tf.float32) 31 | self.final_state = tf.identity(self.final_state, name='final_state') 32 | self.predictions = tf.contrib.layers.fully_connected(outputs, self.vocab_size, activation_fn=None) 33 | # Probabilities for generating words 34 | probs = tf.nn.softmax(self.predictions, name='probs') 35 | 36 | def define_loss(self): 37 | # Defining the sequence loss 38 | with tf.variable_scope('Sequence_Loss'): 39 | self.loss = seq2seq.sequence_loss(self.predictions, self.Y, 40 | tf.ones([self.input_shape[0], self.input_shape[1]])) 41 | 42 | def define_optimizer(self): 43 | with tf.variable_scope("Optimizer"): 44 | optimizer = tf.train.AdamOptimizer(LEARNING_RATE) 45 | # Gradient Clipping 46 | gradients = optimizer.compute_gradients(self.loss) 47 | capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients] 48 | self.train_op = optimizer.apply_gradients(capped_gradients) -------------------------------------------------------------------------------- /Chapter13/parameters.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file contains all the parameters required for running the model 3 | ''' 4 | 5 | 6 | # Training Parameters 7 | NUM_EPOCHS = 500 8 | LEARNING_RATE = 0.001 # Learning Rate 9 | BATCH_SIZE = 128 # Batch Size 10 | CHECKPOINT_PATH_DIR = './model_dir' 11 | RESTORE_TRAINING=False 12 | SAVE_DIR = './save' 13 | 14 | # Network Parameters 15 | RNN_SIZE = 128 # RNN Size 16 | SEQ_LENGTH = 32 # Sequence Length 17 | 18 | # Data Parameters 19 | TEXT_SAVE_DIR= "./data/postgre_book.txt" 20 | -------------------------------------------------------------------------------- /Chapter13/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.3.0 2 | astor==0.7.1 3 | atomicwrites==1.2.1 4 | attrs==18.2.0 5 | backports-abc==0.5 6 | backports.functools-lru-cache==1.5 7 | backports.weakref==1.0.post1 8 | bleach==1.5.0 9 | certifi==2018.10.15 10 | cloudpickle==0.6.1 11 | configparser==3.5.0 12 | cycler==0.10.0 13 | dask==0.19.4 14 | decorator==4.3.0 15 | enum34==1.1.6 16 | funcsigs==1.0.2 17 | functools32==3.2.3.post2 18 | futures==3.2.0 19 | gast==0.2.0 20 | graphviz==0.8 21 | grpcio==1.12.1 22 | h5py==2.8.0 23 | html5lib==0.9999999 24 | imageio==2.4.1 25 | Keras==2.2.4 26 | Keras-Applications==1.0.6 27 | Keras-Preprocessing==1.0.5 28 | kiwisolver==1.0.1 29 | linecache2==1.0.0 30 | Markdown==2.6.11 31 | matplotlib==2.2.2 32 | mkl-fft==1.0.4 33 | mkl-random==1.0.1 34 | mock==2.0.0 35 | more-itertools==4.3.0 36 | multipledispatch==0.6.0 37 | mxnet==0.11.0rc1 38 | networkx==2.2 39 | numpy==1.14.5 40 | olefile==0.45.1 41 | pandas==0.23.2 42 | pathlib2==2.3.2 43 | patsy==0.5.0 44 | pbr==4.2.0 45 | Pillow==5.3.0 46 | pluggy==0.7.1 47 | protobuf==3.6.1 48 | py==1.6.0 49 | pyparsing==2.2.0 50 | pytest==3.8.1 51 | python-dateutil==2.7.3 52 | pytz==2018.5 53 | PyWavelets==1.0.1 54 | PyYAML==3.13 55 | scandir==1.9.0 56 | scikit-image==0.14.0 57 | scikit-learn==0.20.0 58 | scipy==1.1.0 59 | seaborn==0.9.0 60 | singledispatch==3.4.0.3 61 | six==1.11.0 62 | statsmodels==0.9.0 63 | subprocess32==3.5.2 64 | tensorboard==1.11.0 65 | tensorflow==1.11.0 66 | tensorflow-probability==0.4.0 67 | termcolor==1.1.0 68 | toolz==0.9.0 69 | tornado==5.1 70 | tqdm==4.23.4 71 | traceback2==1.4.0 72 | unittest2==1.1.0 73 | Werkzeug==0.14.1 74 | wget==3.2 75 | -------------------------------------------------------------------------------- /Chapter13/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | from parameters import * 4 | import tensorflow as tf 5 | import numpy as np 6 | 7 | def load_data(): 8 | """ 9 | Loading Data 10 | """ 11 | input_file = os.path.join(TEXT_SAVE_DIR) 12 | with open(input_file, "r") as f: 13 | data = f.read() 14 | 15 | return data 16 | 17 | def preprocess_and_save_data(): 18 | """ 19 | Preprocessing the Book Scripts Dataset 20 | """ 21 | text = load_data() 22 | token_dict = define_tokens() 23 | for key, token in token_dict.items(): 24 | text = text.replace(key, ' {} '.format(token)) 25 | 26 | text = text.lower() 27 | text = text.split() 28 | 29 | vocab_to_int, int_to_vocab = create_map(text) 30 | int_text = [vocab_to_int[word] for word in text] 31 | pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('processed_text.p', 'wb')) 32 | 33 | 34 | def load_preprocess_file(): 35 | """ 36 | Loading the processed Book Scripts Data 37 | """ 38 | return pickle.load(open('processed_text.p', mode='rb')) 39 | 40 | 41 | def save_params(params): 42 | """ 43 | Saving parameters to file 44 | """ 45 | pickle.dump(params, open('parameters.p', 'wb')) 46 | 47 | 48 | def load_params(): 49 | """ 50 | Loading parameters from file 51 | """ 52 | return pickle.load(open('parameters.p', mode='rb')) 53 | 54 | def create_map(input_text): 55 | """ 56 | Map words in vocab to int and vice versa for easy lookup 57 | :param input_text: Book Script data split into words 58 | :return: A tuple of dicts (vocab_to_int, int_to_vocab) 59 | """ 60 | vocab = set(input_text) 61 | vocab_to_int = {c: i for i, c in enumerate(vocab)} 62 | int_to_vocab = dict(enumerate(vocab)) 63 | return vocab_to_int, int_to_vocab 64 | 65 | def define_tokens(): 66 | """ 67 | Generate a dict to turn punctuation into a token. Note that Sym before each text denotes Symbol 68 | :return: Tokenize dictionary where the key is the punctuation and the value is the token 69 | """ 70 | dict = {'.':'_Sym_Period_', 71 | ',':'_Sym_Comma_', 72 | '"':'_Sym_Quote_', 73 | ';':'_Sym_Semicolon_', 74 | '!':'_Sym_Exclamation_', 75 | '?':'_Sym_Question_', 76 | '(':'_Sym_Left_Parentheses_', 77 | ')':'_Sym_Right_Parentheses_', 78 | '--':'_Sym_Dash_', 79 | '\n':'_Sym_Return_', 80 | } 81 | return dict 82 | 83 | def generate_batch_data(int_text): 84 | """ 85 | Generate batch data of x (inputs) and y (targets) 86 | :param int_text: Text with the words replaced by their ids 87 | :return: Batches as a Numpy array 88 | """ 89 | num_batches = len(int_text) // (BATCH_SIZE * SEQ_LENGTH) 90 | 91 | x = np.array(int_text[:num_batches * (BATCH_SIZE * SEQ_LENGTH)]) 92 | y = np.array(int_text[1:num_batches * (BATCH_SIZE * SEQ_LENGTH) + 1]) 93 | 94 | x_batches = np.split(x.reshape(BATCH_SIZE, -1), num_batches, 1) 95 | y_batches = np.split(y.reshape(BATCH_SIZE, -1), num_batches, 1) 96 | batches = np.array(list(zip(x_batches, y_batches))) 97 | return batches 98 | 99 | def extract_tensors(tf_graph): 100 | """ 101 | Get input, initial state, final state, and probabilities tensor from the graph 102 | :param loaded_graph: TensorFlow graph loaded from file 103 | :return: Tuple (tensor_input,tensor_initial_state,tensor_final_state, tensor_probs) 104 | """ 105 | tensor_input = tf_graph.get_tensor_by_name("Input/input:0") 106 | tensor_initial_state = tf_graph.get_tensor_by_name("Network/initial_state:0") 107 | tensor_final_state = tf_graph.get_tensor_by_name("Network/final_state:0") 108 | tensor_probs = tf_graph.get_tensor_by_name("Network/probs:0") 109 | return tensor_input, tensor_initial_state, tensor_final_state, tensor_probs 110 | 111 | def select_next_word(probs, int_to_vocab): 112 | """ 113 | Select the next work for the generated text 114 | :param probs: list of probabilities of all the words in vocab which can be selected as next word 115 | :param int_to_vocab: Dictionary of word ids as the keys and words as the values 116 | :return: predicted next word 117 | """ 118 | index = np.argmax(probs) 119 | word = int_to_vocab[index] 120 | return word 121 | 122 | 123 | def predict_book_script(): 124 | _, vocab_to_int, int_to_vocab, token_dict = load_preprocess_file() 125 | seq_length, load_dir = load_params() 126 | 127 | script_length = 250 # Length of Book script to generate. 250 denotes 250 words 128 | 129 | first_word = 'postgresql' # postgresql or any other word from the book 130 | 131 | loaded_graph = tf.Graph() 132 | with tf.Session(graph=loaded_graph) as sess: 133 | # Load saved model 134 | loader = tf.train.import_meta_graph(load_dir + '.meta') 135 | loader.restore(sess, load_dir) 136 | 137 | # Get Tensors from loaded model 138 | input_text, initial_state, final_state, probs = extract_tensors(loaded_graph) 139 | 140 | # Sentences generation setup 141 | sentences = [first_word] 142 | previous_state = sess.run(initial_state, {input_text: np.array([[1]])}) 143 | # Generate sentences 144 | for i in range(script_length): 145 | # Dynamic Input 146 | dynamic_input = [[vocab_to_int[word] for word in sentences[-seq_length:]]] 147 | dynamic_seq_length = len(dynamic_input[0]) 148 | 149 | # Get Prediction 150 | probabilities, previous_state = sess.run([probs, final_state], {input_text: dynamic_input, initial_state: previous_state}) 151 | probabilities= np.squeeze(probabilities) 152 | 153 | pred_word = select_next_word(probabilities[dynamic_seq_length - 1], int_to_vocab) 154 | sentences.append(pred_word) 155 | 156 | # Scraping out tokens from the words 157 | book_script = ' '.join(sentences) 158 | for key, token in token_dict.items(): 159 | book_script = book_script.replace(' ' + token.lower(), key) 160 | book_script = book_script.replace('\n ', '\n') 161 | book_script = book_script.replace('( ', '(') 162 | 163 | # Write the generated script to a file 164 | with open("book_script", "w") as text_file: 165 | text_file.write(book_script) 166 | 167 | print(book_script) 168 | -------------------------------------------------------------------------------- /Chapter14/pacman-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/TensorFlow-Machine-Learning-Projects/157d13317780d845fd2c781a036c8281c780ab30/Chapter14/pacman-1.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Packt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | --------------------------------------------------------------------------------