├── ch12
├── images
│ ├── Icon
│ ├── image.png
│ ├── 01_tensorboard_graph.png
│ ├── 01_tensorboard_image.png
│ ├── 01_tensorboard_scalars.png
│ ├── 01_tensorboard_histograms.png
│ ├── 02_tensorboard_hparams_coord.png
│ └── 02_tensorboard_hparams_table.png
├── 03_Implementing_Unit_Tests
│ ├── Icon
│ ├── readme.md
│ └── 03_implementing_unit_tests.py
├── 04_Using_Multiple_Devices
│ ├── Icon
│ ├── readme.md
│ └── 04_using_multiple_devices.ipynb
├── 05_Parallelizing_TensorFlow
│ ├── Icon
│ ├── readme.md
│ └── 05_parallelizing_tensorflow.ipynb
├── 01_Visualizing_Computational_Graphs
│ ├── Icon
│ ├── readme.md
│ └── 01_using_tensorboard.ipynb
├── 02_Hyperparameter_tuning_with_HParams
│ ├── Icon
│ ├── readme.md
│ └── 02_Hyperparameter_tuning_with_HParams.ipynb
├── 06_Saving_Restoring_TF_Model
│ ├── readme.md
│ └── 06_Saving_Restoring_TF_Model.ipynb
├── readme.md
└── 07_Using_TensorFlow_Serving
│ └── 07_Using_TFX_Serving.ipynb
├── ch8
├── 06_Deepdream
│ ├── Icon
│ └── readme.md
├── 01_Intro_to_CNN
│ ├── Icon
│ └── readme.md
├── 03_CNN_CIFAR10
│ ├── Icon
│ └── readme.md
├── 02_Intro_to_CNN_MNIST
│ ├── Icon
│ └── readme.md
├── 05_Stylenet_NeuralStyle
│ ├── Icon
│ └── readme.md
├── 04_Retraining_Current_Architectures
│ ├── Icon
│ └── readme.md
├── images
│ ├── image.png
│ ├── 01_intro_cnn.png
│ ├── 02_cnn1_acc.png
│ ├── 02_cnn1_loss.png
│ ├── book_cover.jpg
│ ├── starry_night.jpg
│ ├── 01_intro_cnn2.png
│ ├── 05_stylenet_ex.png
│ ├── 06_deepdream_ex.png
│ └── 02_cnn1_mnist_output.png
└── readme.md
├── .gitignore
├── ch6
├── 06_Using_Multiple_Layers
│ ├── readme.md
│ └── birth_weight.csv
├── 07_Improving_Linear_Regression
│ ├── readme.md
│ └── birth_weight.csv
├── 05_Implementing_Different_Layers
│ ├── readme.md
│ └── 05_implementing_different_layers.ipynb
├── 02_Implementing_an_Operational_Gate
│ ├── readme.md
│ └── 02_gates.ipynb
├── 03_Working_with_Activation_Functions
│ └── readme.md
├── images
│ ├── image.png
│ ├── 04_nn_layout.png
│ ├── 04_nn_loss.png
│ ├── 08_tictactoe_loss.png
│ ├── 08_tictactoe_layout.png
│ ├── 02_operational_gates.png
│ └── 08_tic_tac_toe_architecture.png
├── 08_Learning_Tic_Tac_Toe
│ ├── base_tic_tac_toe_moves.csv
│ ├── readme.md
│ └── TicTacToe.ipynb
├── 04_Single_Hidden_Layer_Network
│ └── readme.md
├── readme.md
└── 01_Introduction
│ └── readme.md
├── ch10
└── data_sources_list.txt
├── ch9
└── data_sources_list.txt
├── README.md
├── LICENSE
├── birthweight.dat
└── ch3
├── 04-keras-subclassing_api.ipynb
└── 02-keras-sequential_api.ipynb
/ch12/images/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/06_Deepdream/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/01_Intro_to_CNN/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/03_CNN_CIFAR10/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/02_Intro_to_CNN_MNIST/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/05_Stylenet_NeuralStyle/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch12/03_Implementing_Unit_Tests/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch12/04_Using_Multiple_Devices/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch12/05_Parallelizing_TensorFlow/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch12/01_Visualizing_Computational_Graphs/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch12/02_Hyperparameter_tuning_with_HParams/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ch8/04_Retraining_Current_Architectures/Icon
:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .ipynb_checkpoints/
3 | logs/
--------------------------------------------------------------------------------
/ch6/06_Using_Multiple_Layers/readme.md:
--------------------------------------------------------------------------------
1 | # Using Multiple Layers
2 |
3 | Placeholder for future purposes.
4 |
--------------------------------------------------------------------------------
/ch12/03_Implementing_Unit_Tests/readme.md:
--------------------------------------------------------------------------------
1 | # Implementing Unit Tests
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch12/04_Using_Multiple_Devices/readme.md:
--------------------------------------------------------------------------------
1 | # Using Multiple Devices
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch12/06_Saving_Restoring_TF_Model/readme.md:
--------------------------------------------------------------------------------
1 | # A Production Example
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch12/05_Parallelizing_TensorFlow/readme.md:
--------------------------------------------------------------------------------
1 | # Parallelizing TensorFlow
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch6/07_Improving_Linear_Regression/readme.md:
--------------------------------------------------------------------------------
1 | # Improving Linear Regression
2 |
3 | Placeholder for future purposes.
4 |
--------------------------------------------------------------------------------
/ch6/05_Implementing_Different_Layers/readme.md:
--------------------------------------------------------------------------------
1 | # Implementing Different Layers
2 |
3 | Placeholder for future purposes.
4 |
--------------------------------------------------------------------------------
/ch6/02_Implementing_an_Operational_Gate/readme.md:
--------------------------------------------------------------------------------
1 | # Implementing an Operational Gate
2 |
3 | Placeholder for future purposes.
4 |
--------------------------------------------------------------------------------
/ch6/03_Working_with_Activation_Functions/readme.md:
--------------------------------------------------------------------------------
1 | # Working with Activation Functions
2 |
3 | Placeholder for future purposes.
4 |
--------------------------------------------------------------------------------
/ch10/data_sources_list.txt:
--------------------------------------------------------------------------------
1 | chapter 10 data sources:
2 |
3 | sentiment analysis: https://www.kaggle.com/c/tweet-sentiment-extraction
4 |
--------------------------------------------------------------------------------
/ch12/02_Hyperparameter_tuning_with_HParams/readme.md:
--------------------------------------------------------------------------------
1 | # Hyperparameter tuning with HParams
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch12/images/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/image.png
--------------------------------------------------------------------------------
/ch6/images/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/image.png
--------------------------------------------------------------------------------
/ch8/images/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/image.png
--------------------------------------------------------------------------------
/ch12/01_Visualizing_Computational_Graphs/readme.md:
--------------------------------------------------------------------------------
1 | # Visualizing Computational Graphs (w/Tensorboard)
2 |
3 | Placeholder for future purposes
4 |
--------------------------------------------------------------------------------
/ch6/images/04_nn_layout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/04_nn_layout.png
--------------------------------------------------------------------------------
/ch6/images/04_nn_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/04_nn_loss.png
--------------------------------------------------------------------------------
/ch8/images/01_intro_cnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/01_intro_cnn.png
--------------------------------------------------------------------------------
/ch8/images/02_cnn1_acc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/02_cnn1_acc.png
--------------------------------------------------------------------------------
/ch8/images/02_cnn1_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/02_cnn1_loss.png
--------------------------------------------------------------------------------
/ch8/images/book_cover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/book_cover.jpg
--------------------------------------------------------------------------------
/ch8/images/starry_night.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/starry_night.jpg
--------------------------------------------------------------------------------
/ch8/images/01_intro_cnn2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/01_intro_cnn2.png
--------------------------------------------------------------------------------
/ch8/images/05_stylenet_ex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/05_stylenet_ex.png
--------------------------------------------------------------------------------
/ch6/images/08_tictactoe_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/08_tictactoe_loss.png
--------------------------------------------------------------------------------
/ch8/images/06_deepdream_ex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/06_deepdream_ex.png
--------------------------------------------------------------------------------
/ch6/images/08_tictactoe_layout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/08_tictactoe_layout.png
--------------------------------------------------------------------------------
/ch12/images/01_tensorboard_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/01_tensorboard_graph.png
--------------------------------------------------------------------------------
/ch12/images/01_tensorboard_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/01_tensorboard_image.png
--------------------------------------------------------------------------------
/ch6/images/02_operational_gates.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/02_operational_gates.png
--------------------------------------------------------------------------------
/ch8/images/02_cnn1_mnist_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch8/images/02_cnn1_mnist_output.png
--------------------------------------------------------------------------------
/ch12/images/01_tensorboard_scalars.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/01_tensorboard_scalars.png
--------------------------------------------------------------------------------
/ch12/images/01_tensorboard_histograms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/01_tensorboard_histograms.png
--------------------------------------------------------------------------------
/ch6/images/08_tic_tac_toe_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch6/images/08_tic_tac_toe_architecture.png
--------------------------------------------------------------------------------
/ch12/images/02_tensorboard_hparams_coord.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/02_tensorboard_hparams_coord.png
--------------------------------------------------------------------------------
/ch12/images/02_tensorboard_hparams_table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Machine-Learning-Using-TensorFlow-Cookbook/HEAD/ch12/images/02_tensorboard_hparams_table.png
--------------------------------------------------------------------------------
/ch8/03_CNN_CIFAR10/readme.md:
--------------------------------------------------------------------------------
1 | CIFAR-10 CNN
2 | ============
3 |
4 | Here we will build an convolutional neural network to predict the CIFAR-10 data.
5 |
6 | The notebook provided will get the CIFAR-10 data. Then it will start training a CNN from scratch.
--------------------------------------------------------------------------------
/ch8/04_Retraining_Current_Architectures/readme.md:
--------------------------------------------------------------------------------
1 | Retraining (fine-tuning) Current CNN Architectures
2 | ==================================================
3 |
4 | The purpose of this notebook provided in this section is to download the CIFAR-10 dataset and re-train the Google Inception model to work on it.
--------------------------------------------------------------------------------
/ch8/06_Deepdream/readme.md:
--------------------------------------------------------------------------------
1 | Deepdream in TensorFlow
2 | =======================
3 |
4 | This recipe is an adapted version of the official TensorFlow [Deep Dream tutorial](https://www.tensorflow.org/tutorials/generative/deepdream)
5 |
6 | Here is a potential output.
7 |
8 | 
9 |
10 |
--------------------------------------------------------------------------------
/ch9/data_sources_list.txt:
--------------------------------------------------------------------------------
1 | Data sources for chapter 9:
2 |
3 | * stock price prediction https://www.kaggle.com/mczielinski/bitcoin-historical-data
4 |
5 | * sentiment classification: https://www.kaggle.com/anindya2906/glove6b
6 | https://www.kaggle.com/kazanova/sentiment140
7 |
8 | * question answering: https://www.kaggle.com/c/tensorflow2-question-answering/data
9 |
10 |
--------------------------------------------------------------------------------
/ch8/05_Stylenet_NeuralStyle/readme.md:
--------------------------------------------------------------------------------
1 | Stylenet / Neural-Style
2 | =======================
3 |
4 | The purpose of this script is to illustrate how to do stylenet in TensorFlow.
5 | We reference the following [paper](https://arxiv.org/abs/1508.06576) for this algorithm.
6 |
7 | We use two images, a content image and a style image and try to make the content image in the style of the style image.
8 |
9 | 
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | # Machine Learning Using TensorFlow Cookbook
5 | Machine Learning Using TensorFlow Cookbook, published by Packt.
6 | ### Download a free PDF
7 |
8 | If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.
Simply click on the link to claim your free PDF.
9 |
https://packt.link/free-ebook/9781800208865
--------------------------------------------------------------------------------
/ch6/08_Learning_Tic_Tac_Toe/base_tic_tac_toe_moves.csv:
--------------------------------------------------------------------------------
1 | 0,0,0,0,-1,0,0,0,0,0
2 | 0,-1,0,0,0,0,0,0,0,0
3 | 0,0,0,0,0,-1,0,0,0,6
4 | -1,0,0,0,0,0,0,0,0,4
5 | 0,0,0,0,0,0,1,-1,-1,3
6 | 0,-1,0,0,1,0,0,0,-1,0
7 | 0,-1,1,0,0,-1,0,0,0,7
8 | -1,0,0,0,-1,0,0,0,1,6
9 | 0,0,1,0,0,-1,-1,0,0,4
10 | 0,0,-1,0,0,0,0,-1,1,4
11 | 1,0,0,-1,0,0,0,-1,0,2
12 | 0,0,-1,0,1,0,-1,0,0,5
13 | -1,0,0,1,-1,-1,0,0,1,6
14 | -1,1,-1,0,1,0,0,1,0,8
15 | 0,0,0,-1,0,1,1,-1,-1,1
16 | -1,1,0,0,0,-1,0,-1,1,3
17 | 0,-1,1,0,1,-1,-1,0,0,8
18 | 0,0,-1,1,0,-1,0,-1,1,0
19 | 1,-1,0,0,-1,0,0,0,0,7
20 | 1,0,-1,0,-1,0,0,0,0,6
21 | 1,0,0,0,-1,0,-1,0,0,2
22 | 1,0,0,0,-1,-1,0,0,0,3
23 | 1,0,0,0,-1,0,0,0,-1,6
24 | 1,-1,0,-1,-1,0,0,1,0,5
25 | 1,-1,0,0,-1,0,-1,1,0,2
26 | 1,-1,-1,0,-1,0,0,1,0,6
27 | 1,-1,0,0,-1,-1,0,1,0,3
28 | 1,0,-1,-1,-1,0,1,0,0,8
29 | 1,-1,1,0,-1,0,-1,0,0,7
30 | 1,0,0,1,-1,-1,-1,0,0,2
31 | 1,0,0,-1,-1,0,1,0,-1,5
32 |
--------------------------------------------------------------------------------
/ch8/02_Intro_to_CNN_MNIST/readme.md:
--------------------------------------------------------------------------------
1 | Introduction to CNN with MNIST
2 | ==============================
3 |
4 | Here we illustrate how to use a simple CNN with three convolutional units to predict the MNIST handwritten digits. There is good reason why this dataset is used like the 'hello world' of image recognition, it is fairly compact while having a decent amount of training, test, and validation data. It only has one channel (black and white) and only ten possible outputs (0-9).
5 |
6 | When the script is done training the model, you should see similar output to the following graphs.
7 |
8 | 
9 |
10 | Training and test loss.
11 |
12 | 
13 |
14 | Training and test batch accuracy.
15 |
16 | 
17 |
18 | A random set of 6 digits with actuals and predicted labels. You can see a prediction failure in the lower right box.
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ch6/04_Single_Hidden_Layer_Network/readme.md:
--------------------------------------------------------------------------------
1 | # Implementing a One Layer Neural Network
2 |
3 | We will use the [Iris data](http://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html) for this exercise. We will build a one-hidden layer fully connected neural network to predict one of the flower attributes from the other three.
4 |
5 | The four flower attributes are (1) sepal length, (2) sepal width, (3) pedal length, and (4) pedal width. We will use (1-3) to predict (4). The main purpose of this section is to illustrate how neural networks can implement regression just as easily as classification. Later on in this chapter, we will extend this model to have multiple hidden layers.
6 |
7 | # Model
8 |
9 | The model will have one hidden layer. If the hidden layer has 10 nodes, then the model will look like the following:
10 |
11 | 
12 |
13 | We will use the ReLU activation functions.
14 |
15 | For the loss function, we will use the average MSE across the batch.
16 |
17 | # Graph of Loss Function (Average Batch MSE)
18 |
19 | Running the script should result in a similar loss.
20 |
21 | 
22 |
--------------------------------------------------------------------------------
/ch8/readme.md:
--------------------------------------------------------------------------------
1 | ## Ch 8: Convolutional Neural Networks
2 |
3 | 1. [Introduction](01_Intro_to_CNN#introduction-to-convolutional-neural-networks)
4 | * We introduce convolutional neural networks (CNN), and how we can use them in TensorFlow.
5 | 2. [Implementing a Simple CNN.](02_Intro_to_CNN_MNIST#introduction-to-cnn-with-mnist)
6 | * Here, we show how to create a CNN architecture that performs well on the MNIST digit recognition task.
7 | 3. [Implementing an Advanced CNN.](03_CNN_CIFAR10#cifar-10-cnn)
8 | * In this example, we show how to replicate an architecture for the CIFAR-10 image recognition task.
9 | 4. [Retraining an Existing Architecture.](04_Retraining_Current_Architectures#retraining-fine-tuning-current-cnn-architectures)
10 | * We show how to re-train the Google Inception model to work on the CIFAR-10 dataset.
11 | 5. [Using Stylenet/NeuralStyle.](05_Stylenet_NeuralStyle#stylenet--neural-style)
12 | * In this recipe, we show a basic implementation of using Stylenet or Neuralstyle.
13 | 6. [Implementing Deep Dream.](06_Deepdream#deepdream-in-tensorflow)
14 | * This script shows a line-by-line explanation of TensorFlow's deepdream tutorial. Taken from [Deepdream on TensorFlow](https://www.tensorflow.org/tutorials/generative/deepdream).
15 |
--------------------------------------------------------------------------------
/ch12/readme.md:
--------------------------------------------------------------------------------
1 | ## Ch 12: Taking TensorFlow to Production
2 |
3 | 1. [Visualizing graphs in TensorBoard](01_Visualizing_Computational_Graphs#01_using_tensorboard)
4 | * How to visualize graphs and important values (loss, accuracy, batch training time, and so on) even during training.
5 | 2. [Managing Hyperparameter Tuning with TensorBoard’s HParams](02_Hyperparameter_tuning_with_HParams#02_Hyperparameter_tuning_with_HParams)
6 | * How HParams HParams, a TensorBoard plugin, allows finding the better hyperparameters optimization.
7 | 3. [Implementing unit tests using tf.test](03_Implementing_Unit_Tests#03_implementing_unit_tests)
8 | * How to implement test using TensorFlow's testing framework.
9 | 4. [Using multiple executors](04_Using_Multiple_Devices#04_using_multiple_devices)
10 | * How to use a machine with multiple devices. E.g., a machine with a CPU, and one or more GPUs.
11 | 5. [Parallelizing TensorFlow using tf.distribute.strategy](05_Parallelizing_TensorFlow#05_parallelizing_tensorflow)
12 | * How to use mirrored strategy for speedup the training.
13 | 6. [Saving and restoring a TensorFlow model](06_Saving_Restoring_TF_Model#06_Saving_Restoring_TF_Model)
14 | * How to save and restore a TensorFlow model.
15 | 7. [Using TensorFlow Serving](07_Using_TensorFlow_Serving#07_Using_TFX_Serving)
16 | * How to deploy models using TensorFlow Serving component (TFX).
17 |
--------------------------------------------------------------------------------
/ch12/03_Implementing_Unit_Tests/03_implementing_unit_tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | import tensorflow as tf
5 | import numpy as np
6 |
7 | # Create a nested gate: $f(x) = a1 * x + b1$
8 | #
9 | #
10 | # a1 --
11 | # |
12 | # |-- (multiply)--
13 | # | |
14 | # x -- |-- (add) -->output
15 | # |
16 | # b1 --
17 | #
18 |
19 |
20 |
21 | class MyCustomGate(tf.keras.layers.Layer):
22 |
23 | def __init__(self, units, a1, b1):
24 | super(MyCustomGate, self).__init__()
25 | self.units = units
26 | self.a1 = a1
27 | self.b1 = b1
28 |
29 | # Compute f(x) = a1 * x + b1
30 | def call(self, inputs):
31 | return inputs * self.a1 + self.b1
32 |
33 |
34 |
35 | class MyCustomGateTest(tf.test.TestCase):
36 |
37 | def setUp(self):
38 | super(MyCustomGateTest, self).setUp()
39 | # Configure the layer with 1 unit, a1 = 2 et b1=1
40 | self.my_custom_gate = MyCustomGate(1,2,1)
41 |
42 | def testMyCustomGateOutput(self):
43 | input_x = np.array([[1,0,0,1],
44 | [1,0,0,1]])
45 | output = self.my_custom_gate(input_x)
46 | expected_output = np.array([[3,1,1,3], [3,1,1,3]])
47 |
48 | self.assertAllEqual(output, expected_output)
49 |
50 |
51 | tf.test.main()
52 |
53 |
54 |
--------------------------------------------------------------------------------
/ch8/01_Intro_to_CNN/readme.md:
--------------------------------------------------------------------------------
1 | ## Introduction to Convolutional Neural Networks
2 |
3 | Convolutional Neural Networks (CNNs) are responsible for the latest major breakthroughs in image recognition in the past few years.
4 |
5 | In mathematics, a convolution is a function which is applied over the output of another function. In our case, we will consider applying a matrix mutliplication (filter) across an image. See the below diagram for an example of how this may work.
6 |
7 | 
8 |
9 | CNNs generally follow a structure. The main convolutional setup is (input array) -> (convolutional filter layer) -> (Pooling) -> (Activation layer). The above diagram depicts how a convolutional layer may create one feature. Generally, filters are multidimensional and end up creating many features. It is also common to have a completely separate filter-feature creator of different sizes acting on the same layer. After this convolutional filter, it is common to apply a pooling layer. This pooling may be a max-pooling or an average pooling or another aggregation. One of the key concepts here is that the pooling layer has no parameters- while decreasing the layer size. See the below diagram for an example of max-pooling.
10 |
11 | 
12 |
13 | After the max pooling, there is generally an activation layer. One of the more common activation layers is the ReLU (Rectified Linear Unit). See [Chapter 1, Section 6](../../01_Introduction/06_Implementing_Activation_Functions) for examples.
14 |
--------------------------------------------------------------------------------
/ch6/readme.md:
--------------------------------------------------------------------------------
1 | ## Ch 6: Neural Networks
2 |
3 | Neural Networks are very important in machine learning and growing in popularity due to the major breakthroughs in prior unsolved problems. We must start with introducing 'shallow' neural networks, which are very powerful and can help us improve our prior ML algorithm results. We start by introducing the very basic NN unit, the operational gate. We gradually add more and more to the neural network and end with training a model to play tic-tac-toe.
4 |
5 | 1. [Introduction](01_Introduction#neural-networks-introduction)
6 | * We introduce the concept of neural networks and how TensorFlow is built to easily handle these algorithms.
7 | 2. [Implementing Operational Gates](02_Implementing_an_Operational_Gate#implementing-an-operational-gate)
8 | * We implement an operational gate with one operation. Then we show how to extend this to multiple nested operations.
9 | 3. [Working with Gates and Activation Functions](03_Working_with_Activation_Functions#working-with-activation-functions)
10 | * Now we have to introduce activation functions on the gates. We show how different activation functions operate.
11 | 4. [Implementing a One Layer Neural Network](04_Single_Hidden_Layer_Network#implementing-a-one-layer-neural-network)
12 | * We have all the pieces to start implementing our first neural network. We do so here with regression on the Iris data set.
13 | 5. [Implementing Different Layers](05_Implementing_Different_Layers#implementing-different-layers)
14 | * This section introduces the convolution layer and the max-pool layer. We show how to chain these together in a 1D and 2D example with fully connected layers as well.
15 | 6. [Using Multi-layer Neural Networks](06_Using_Multiple_Layers#using-multiple-layers)
16 | * Here we show how to functionalize different layers and variables for a cleaner multi-layer neural network.
17 | 7. [Improving Predictions of Linear Models](07_Improving_Linear_Regression#improving-linear-regression)
18 | * We show how we can improve the convergence of our prior logistic regression with a set of hidden layers.
19 | 8. [Learning to Play Tic-Tac-Toe](08_Learning_Tic_Tac_Toe#learning-to-play-tic-tac-toe)
20 | * Given a set of tic-tac-toe boards and corresponding optimal moves, we train a neural network classification model to play. At the end of the script, we can attempt to play against the trained model.
21 |
--------------------------------------------------------------------------------
/ch6/08_Learning_Tic_Tac_Toe/readme.md:
--------------------------------------------------------------------------------
1 | # Learning to Play Tic-Tac-Toe
2 |
3 | ## Goal
4 |
5 | This example intends to feed examples of best moves for many different board combinations into a neural network in order to train the model to play Tic-Tac-Toe.
6 |
7 | The end of the script provides the user a chance to play against the trained model by asking for input moves and feeding such input moves into the model.
8 |
9 | ## Data Format
10 |
11 | All tic-tac-toe boards can be reduced down to a small number of boards, if we consider all geometric transformations on them. Such geometric transformations include:
12 |
13 | - Rotate 90 deg.
14 | - Rotate 180 deg.
15 | - Rotate 270 deg.
16 | - Vertical reflection.
17 | - Horizontal reflection.
18 |
19 | All possible boards can be generated from the base board with at most 2 transformations.
20 |
21 | The file base\_tic\_tac\_toe\_moves.csv contains rows, each of which represents a unique board representation with the desired best play tactic.
22 |
23 | We denote the board spaces as such, 'X' = 1, 'O'= -1, and an empty space will have a zero in it. The last column is the index of the best play response. A board will be indexed as follows:
24 | ```
25 | 0 | 1 | 2
26 | ---------
27 | 3 | 4 | 5
28 | ---------
29 | 6 | 7 | 8
30 | ```
31 | So for example, the board:
32 | ```
33 | O | |
34 | ---------
35 | X | O | O
36 | ---------
37 | | | X
38 | ```
39 | is equivalent to the row: [-1, 0, 0, 1, -1, -1, 0, 0, 1].
40 |
41 | 
42 |
43 | ## Neural Network Architecture
44 |
45 | We will keep it simple and have only one hidden layer that is fully connected. The hidden layer will be composed of 81 hidden nodes. If only because square numbers are appealing. See the below diagram for the NN we will construct.
46 |
47 | 
48 |
49 | ## Important Functions
50 |
51 | There are a few important functions in the beginning of the code.
52 |
53 | 1. print_board(): takes a board vector and shows it as a tic-tac-toe board.
54 | 2. get_symmetry(): takes a board, the preferred response index, and a transformation. It then applies the transformation to the board and response to get a new vector.
55 |
56 | At the end of the code, we loop through an actual game. This allows the user to actually play the model they created.
57 |
58 | [See Code Here](tic_tac_toe_moves.py)
59 |
60 | ## Sample Game Output
61 |
62 | Here is a sample of the output of playing against the trained model. Human = X's and the model = O's.
63 |
64 | ```
65 | Input index of your move (0-8): 4
66 | Model has moved
67 | O | |
68 | ___________
69 | | X |
70 | ___________
71 | | |
72 |
73 | Input index of your move (0-8): 6
74 | Model has moved
75 | O | |
76 | ___________
77 | | X |
78 | ___________
79 | X | O |
80 |
81 | Input index of your move (0-8): 2
82 | Model has moved
83 | O | | X
84 | ___________
85 | | X | O
86 | ___________
87 | X | O |
88 | Congratulations, You won!
89 | ```
90 |
91 | ## Loss Output
92 |
93 | 
94 |
--------------------------------------------------------------------------------
/ch6/01_Introduction/readme.md:
--------------------------------------------------------------------------------
1 | # Neural Networks Introduction
2 |
3 | ------------------
4 |
5 | In this chapter, we will introduce neural networks and how to implement them in TensorFlow. Most of the subsequent chapters will be based on neural networks, so learning how to use them in TensorFlow is very important. We will start by introducing the basic concepts of neural networking before working up to multilayer networks. In the last section, we will create a neural network that will learn how to play Tic Tac Toe.
6 |
7 | In this chapter, we'll cover the following recipes:
8 |
9 | - Implementing operational gates
10 | - Working with gates and activation functions
11 | - Implementing a one-layer neural network
12 | - Implementing different layers
13 | - Using multilayer networks
14 | - Improving predictions of linear models
15 | - Learning to play Tic Tac Toe
16 |
17 | 
18 |
19 | ----------------
20 | Neural networks are currently breaking records in tasks such as image and speech recognition, reading handwriting, understanding text, image segmentation, dialog systems, autonomous car driving, and so much more. While some of these aforementioned tasks will be covered in later chapters, it is important to introduce neural networks as an easy-to-implement machine learning algorithm, so that we can expand on it later.
21 |
22 | The concept of a neural network has been around for decades. However, it only recently gained traction because we now have the computational power to train large networks because of advances in processing power, algorithm efficiency, and data sizes.
23 |
24 | A neural network is basically a sequence of operations applied to a matrix of input data. These operations are usually collections of additions and multiplications followed by the application of non-linear functions. One example that we have already seen is logistic regression, which we looked at in Chapter 4, Linear Regression. Logistic regression is the sum of partial slope-feature products followed by the application of the sigmoid function, which is non-linear. Neural networks generalize this a bit more by allowing any combination of operations and non-linear functions, which includes the application of absolute value, maximum, minimum, and so on.
25 |
26 | The important trick with neural networks is called back propagation. Back propagation is a procedure that allows us to update model variables based on the learning rate and the output of the loss function. We used back propagation to update our model variables in Chapter 3, Keras, and Chapter 4, Linear Regression.
27 |
28 | Another important feature to take note of regarding neural networks is the non-linear activation function. Since most neural networks are just combinations of addition and multiplication operations, they will not be able to model non-linear data sets. To address this issue, we have used non-linear activation functions in our neural networks. This will allow the neural network to adapt to most non-linear situations.
29 |
30 | It is important to remember that, as we have seen in many of the algorithms covered, neural networks are sensitive to the hyper-parameters we choose. In this chapter, we will explore the impact of different learning rates, loss functions, and optimization procedures.
31 |
32 | There are more resources for learning about neural networks that are more in depth and detailed. Here are some following resources:
33 |
34 | - The seminal paper describing back propagation is Efficient Back Prop by Yann LeCun et. al. The PDF is located here: http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
35 |
36 | - CS231, Convolutional Neural Networks for Visual Recognition, by Stanford University, class resources available here: http://cs231n.stanford.edu/
37 |
38 | - CS224d, Deep Learning for Natural Language Processing, by Stanford University, class resources available here: http://cs224d.stanford.edu/
39 |
40 | - Deep Learning, a book by the MIT Press. Goodfellow, et. al. 2016. Located: http://www.deeplearningbook.org
41 |
42 | - There is an online book called Neural Networks and Deep Learning by Michael Nielsen, located here: http://neuralnetworksanddeeplearning.com/
43 |
44 | - For a more pragmatic approach and introduction to neural networks, Andrej Karpathy has written a great summary and JavaScript examples called A Hacker's Guide to Neural Networks. The write up is located here: http://karpathy.github.io/neuralnets/
45 |
46 | - Another site that summarizes some good notes on deep learning is called Deep Learning for Beginners by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. This web page can be found here: http://randomekek.github.io/deep/deeplearning.html
47 |
--------------------------------------------------------------------------------
/birthweight.dat:
--------------------------------------------------------------------------------
1 | LOW AGE LWT RACE SMOKE PTL HT UI BWT
2 | 1 28 113 1 1 1 0 1 709
3 | 1 29 130 0 0 0 0 1 1021
4 | 1 34 187 1 1 0 1 0 1135
5 | 1 25 105 1 0 1 1 0 1330
6 | 1 25 85 1 0 0 0 1 1474
7 | 1 27 150 1 0 0 0 0 1588
8 | 1 23 97 1 0 0 0 1 1588
9 | 1 24 128 1 0 1 0 0 1701
10 | 1 24 132 1 0 0 1 0 1729
11 | 1 21 165 0 1 0 1 0 1790
12 | 1 32 105 1 1 0 0 0 1818
13 | 1 19 91 0 1 1 0 1 1885
14 | 1 25 115 1 0 0 0 0 1893
15 | 1 16 130 1 0 0 0 0 1899
16 | 1 25 92 0 1 0 0 0 1928
17 | 1 20 150 0 1 0 0 0 1928
18 | 1 21 190 1 0 0 0 1 1928
19 | 1 24 155 0 1 1 0 0 1936
20 | 1 21 103 1 0 0 0 0 1970
21 | 1 20 125 1 0 0 0 1 2055
22 | 1 25 89 1 0 1 0 0 2055
23 | 1 19 102 0 0 0 0 0 2082
24 | 1 19 112 0 1 0 0 1 2084
25 | 1 26 117 0 1 1 0 1 2084
26 | 1 24 138 0 0 0 0 0 2100
27 | 1 17 130 1 1 1 0 1 2125
28 | 1 20 120 1 1 0 0 0 2126
29 | 1 22 130 0 1 1 0 1 2187
30 | 1 27 130 1 0 0 0 1 2187
31 | 1 20 80 1 1 0 0 1 2211
32 | 1 17 110 0 1 0 0 0 2225
33 | 1 25 105 1 0 1 0 0 2240
34 | 1 20 109 1 0 0 0 0 2240
35 | 1 18 148 1 0 0 0 0 2282
36 | 1 18 110 1 1 1 0 0 2296
37 | 1 20 121 0 1 1 0 1 2296
38 | 1 21 100 1 0 1 0 0 2301
39 | 1 26 96 1 0 0 0 0 2325
40 | 1 31 102 0 1 1 0 0 2353
41 | 1 15 110 0 0 0 0 0 2353
42 | 1 23 187 1 1 0 0 0 2367
43 | 1 20 122 1 1 1 0 0 2381
44 | 1 24 105 1 1 0 0 0 2381
45 | 1 15 115 1 0 0 0 1 2381
46 | 1 23 120 1 0 0 0 0 2395
47 | 1 30 142 0 1 1 0 0 2410
48 | 1 22 130 0 1 0 0 0 2410
49 | 1 17 120 0 1 0 0 0 2414
50 | 1 23 110 0 1 1 0 0 2424
51 | 1 17 120 1 0 0 0 0 2438
52 | 1 26 154 1 0 1 1 0 2442
53 | 1 20 105 1 0 0 0 0 2450
54 | 1 26 168 0 1 0 0 0 2466
55 | 1 14 101 1 1 1 0 0 2466
56 | 1 28 95 0 1 0 0 0 2466
57 | 1 14 100 1 0 0 0 0 2495
58 | 1 23 94 1 1 0 0 0 2495
59 | 1 17 142 1 0 0 1 0 2495
60 | 1 21 130 0 1 0 1 0 2495
61 | 0 19 182 1 0 0 0 1 2523
62 | 0 33 155 1 0 0 0 0 2551
63 | 0 20 105 0 1 0 0 0 2557
64 | 0 21 108 0 1 0 0 1 2594
65 | 0 18 107 0 1 0 0 1 2600
66 | 0 21 124 1 0 0 0 0 2622
67 | 0 22 118 0 0 0 0 0 2637
68 | 0 17 103 1 0 0 0 0 2637
69 | 0 29 123 0 1 0 0 0 2663
70 | 0 26 113 0 1 0 0 0 2665
71 | 0 19 95 1 0 0 0 0 2722
72 | 0 19 150 1 0 0 0 0 2733
73 | 0 22 95 1 0 0 1 0 2750
74 | 0 30 107 1 0 1 0 1 2750
75 | 0 18 100 0 1 0 0 0 2769
76 | 0 18 100 1 1 0 0 0 2769
77 | 0 15 98 1 0 0 0 0 2778
78 | 0 25 118 0 1 0 0 0 2782
79 | 0 20 120 1 0 0 0 1 2807
80 | 0 28 120 0 1 0 0 0 2821
81 | 0 32 121 1 0 0 0 0 2835
82 | 0 31 100 0 0 0 0 1 2835
83 | 0 36 202 0 0 0 0 0 2836
84 | 0 28 120 1 0 0 0 0 2863
85 | 0 25 120 1 0 0 0 1 2877
86 | 0 28 167 0 0 0 0 0 2877
87 | 0 17 122 0 1 0 0 0 2906
88 | 0 29 150 0 0 0 0 0 2920
89 | 0 26 168 1 1 0 0 0 2920
90 | 0 17 113 1 0 0 0 0 2920
91 | 0 17 113 1 0 0 0 0 2920
92 | 0 24 90 0 1 1 0 0 2948
93 | 0 35 121 1 1 1 1 0 2948
94 | 0 25 155 0 1 1 0 0 2977
95 | 0 25 125 1 0 0 0 0 2977
96 | 0 29 140 0 1 0 0 0 2977
97 | 0 19 138 0 1 0 1 0 2977
98 | 0 27 124 0 1 0 0 0 2992
99 | 0 31 115 0 1 0 0 0 3005
100 | 0 33 109 0 1 0 0 0 3033
101 | 0 21 185 1 1 0 0 0 3042
102 | 0 19 189 0 0 0 0 0 3062
103 | 0 23 130 1 0 0 0 0 3062
104 | 0 21 160 0 0 0 0 0 3062
105 | 0 18 90 0 1 0 0 1 3076
106 | 0 18 90 0 1 0 0 1 3076
107 | 0 32 132 0 0 0 0 0 3080
108 | 0 19 132 1 0 0 0 0 3090
109 | 0 24 115 0 0 0 0 0 3090
110 | 0 22 85 1 1 0 0 0 3090
111 | 0 22 120 0 0 0 1 0 3100
112 | 0 23 128 1 0 0 0 0 3104
113 | 0 22 130 0 1 0 0 0 3132
114 | 0 30 95 0 1 0 0 0 3147
115 | 0 19 115 1 0 0 0 0 3175
116 | 0 16 110 1 0 0 0 0 3175
117 | 0 21 110 1 1 0 0 1 3203
118 | 0 30 153 1 0 0 0 0 3203
119 | 0 20 103 1 0 0 0 0 3203
120 | 0 17 119 1 0 0 0 0 3225
121 | 0 17 119 1 0 0 0 0 3225
122 | 0 23 119 1 0 0 0 0 3232
123 | 0 24 110 1 0 0 0 0 3232
124 | 0 28 140 0 0 0 0 0 3234
125 | 0 26 133 1 1 0 0 0 3260
126 | 0 20 169 1 0 1 0 1 3274
127 | 0 24 115 1 0 0 0 0 3274
128 | 0 28 250 1 1 0 0 0 3303
129 | 0 20 141 0 0 0 0 1 3317
130 | 0 22 158 1 0 1 0 0 3317
131 | 0 22 112 0 1 1 0 0 3317
132 | 0 31 150 1 1 0 0 0 3321
133 | 0 23 115 1 1 0 0 0 3331
134 | 0 16 112 1 0 0 0 0 3374
135 | 0 16 135 0 1 0 0 0 3374
136 | 0 18 229 1 0 0 0 0 3402
137 | 0 25 140 0 0 0 0 0 3416
138 | 0 32 134 0 1 1 0 0 3430
139 | 0 20 121 1 1 0 0 0 3444
140 | 0 23 190 0 0 0 0 0 3459
141 | 0 22 131 0 0 0 0 0 3460
142 | 0 32 170 0 0 0 0 0 3473
143 | 0 30 110 1 0 0 0 0 3475
144 | 0 20 127 1 0 0 0 0 3487
145 | 0 23 123 1 0 0 0 0 3544
146 | 0 17 120 1 1 0 0 0 3572
147 | 0 19 105 1 0 0 0 0 3572
148 | 0 23 130 0 0 0 0 0 3586
149 | 0 36 175 0 0 0 0 0 3600
150 | 0 22 125 0 0 0 0 0 3614
151 | 0 24 133 0 0 0 0 0 3614
152 | 0 21 134 1 0 0 0 0 3629
153 | 0 19 235 0 1 0 1 0 3629
154 | 0 25 200 0 0 1 0 1 3637
155 | 0 16 135 0 1 0 0 0 3643
156 | 0 29 135 0 0 0 0 0 3651
157 | 0 29 154 0 0 0 0 0 3651
158 | 0 19 147 0 1 0 0 0 3651
159 | 0 19 147 0 1 0 0 0 3651
160 | 0 30 137 0 0 0 0 0 3699
161 | 0 24 110 0 0 0 0 0 3728
162 | 0 19 184 0 1 0 1 0 3756
163 | 0 24 110 0 0 1 0 0 3770
164 | 0 23 110 0 0 0 0 0 3770
165 | 0 20 120 1 0 0 0 0 3770
166 | 0 25 141 0 0 0 1 0 3790
167 | 0 30 112 0 0 0 0 0 3799
168 | 0 22 169 0 0 0 0 0 3827
169 | 0 18 120 0 1 0 0 0 3856
170 | 0 16 170 1 0 0 0 0 3860
171 | 0 32 186 0 0 0 0 0 3860
172 | 0 18 120 1 0 0 0 0 3884
173 | 0 29 130 0 1 0 0 0 3884
174 | 0 33 117 0 0 0 0 1 3912
175 | 0 20 170 0 1 0 0 0 3940
176 | 0 28 134 1 0 0 0 0 3941
177 | 0 14 135 0 0 1 0 0 3941
178 | 0 28 130 1 0 0 0 0 3969
179 | 0 25 120 0 0 0 0 0 3983
180 | 0 16 135 1 0 0 0 0 3997
181 | 0 20 158 0 0 0 0 0 3997
182 | 0 26 160 0 0 0 0 0 4054
183 | 0 21 115 0 0 0 0 0 4054
184 | 0 22 129 0 0 0 0 0 4111
185 | 0 25 130 0 0 0 0 0 4153
186 | 0 31 120 0 0 0 0 0 4167
187 | 0 35 170 0 0 1 0 0 4174
188 | 0 19 120 0 1 0 1 0 4238
189 | 0 24 216 0 0 0 0 0 4593
190 | 0 45 123 0 0 1 0 0 4990
--------------------------------------------------------------------------------
/ch12/06_Saving_Restoring_TF_Model/06_Saving_Restoring_TF_Model.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "d1y_QQbCQOiZ"
7 | },
8 | "source": [
9 | "# Saving and restoring a TensorFlow model\n",
10 | "\n",
11 | "We start by loading the necessary libraries."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {
18 | "id": "GDbv2ObtQOib"
19 | },
20 | "outputs": [],
21 | "source": [
22 | "import tensorflow as tf"
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {
28 | "id": "DNjUAPjdQOie"
29 | },
30 | "source": [
31 | "Then, we’ll build a MNIST model using the Keras Sequential API."
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {
38 | "id": "i8e0atM5QOif",
39 | "outputId": "b4cfdcb3-125b-4874-df3e-299d7de6b8e3"
40 | },
41 | "outputs": [],
42 | "source": [
43 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
44 | "\n",
45 | "# Normalize\n",
46 | "x_train = x_train / 255\n",
47 | "x_test = x_test/ 255\n",
48 | "\n",
49 | "model = tf.keras.Sequential()\n",
50 | "model.add(tf.keras.layers.Flatten(name=\"FLATTEN\"))\n",
51 | "model.add(tf.keras.layers.Dense(units=128 , activation=\"relu\", name=\"D1\"))\n",
52 | "model.add(tf.keras.layers.Dense(units=64 , activation=\"relu\", name=\"D2\"))\n",
53 | "model.add(tf.keras.layers.Dense(units=10, activation=\"softmax\", name=\"OUTPUT\"))\n",
54 | " \n",
55 | "model.compile(optimizer=\"sgd\", \n",
56 | " loss=\"sparse_categorical_crossentropy\",\n",
57 | " metrics=[\"accuracy\"]\n",
58 | " )\n",
59 | "\n",
60 | "model.fit(x=x_train, \n",
61 | " y=y_train, \n",
62 | " epochs=5,\n",
63 | " validation_data=(x_test, y_test)\n",
64 | " ) "
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "id": "ACtX0msHQOij"
71 | },
72 | "source": [
73 | "## Save and restore an entire model as SavedModel format\n",
74 | "\n",
75 | "The SavedModel is the recommended format for save an entire model to disk."
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {
82 | "id": "B8ieDgFTQOij",
83 | "outputId": "9aeaf6a0-98d3-4fb9-ae26-cf51e8bb83ae"
84 | },
85 | "outputs": [],
86 | "source": [
87 | "model.save(\"SavedModel\")"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {
94 | "id": "LY3rgSU1QOil"
95 | },
96 | "outputs": [],
97 | "source": [
98 | "model2 = tf.keras.models.load_model(\"SavedModel\")"
99 | ]
100 | },
101 | {
102 | "cell_type": "markdown",
103 | "metadata": {
104 | "id": "CtOoiDkqQOio"
105 | },
106 | "source": [
107 | "## Save and restore an entire model as Keras H5 format\n",
108 | "\n",
109 | "We can either passing a filename that ends in `.h5` or adding the `save_format=\"h5\"` argument."
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {
116 | "id": "Bpa0IxzDQOio"
117 | },
118 | "outputs": [],
119 | "source": [
120 | "model.save(\"SavedModel.h5\")"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {
127 | "id": "CufRJurkQOiq"
128 | },
129 | "outputs": [],
130 | "source": [
131 | "model.save(\"model_save\", save_format=\"h5\")"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {
137 | "id": "patsL-mMQOit"
138 | },
139 | "source": [
140 | "## Save and restore weights a TensorFlow Checkpoint\n",
141 | "\n",
142 | "We can also use a `ModelCheckpoint` callback in order to save an entire model or just the weights into a checkpoint structure.\n",
143 | "This callback is added to the callback argument in the `fit` method.\n",
144 | "\n",
145 | "The model weights will be stored every epoch."
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "metadata": {
152 | "id": "6cVSSvYFQOit"
153 | },
154 | "outputs": [],
155 | "source": [
156 | "checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=\"./checkpoint\",\n",
157 | " save_weights_only=True,\n",
158 | " save_freq='epoch')"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "metadata": {
165 | "id": "nQDlRIKLQOiv",
166 | "outputId": "d57e2413-1c7e-40d4-e820-99165acdd8d4"
167 | },
168 | "outputs": [],
169 | "source": [
170 | "\n",
171 | "model.fit(x=x_train, \n",
172 | " y=y_train, \n",
173 | " epochs=5,\n",
174 | " validation_data=(x_test, y_test),\n",
175 | " callbacks=[checkpoint_callback]\n",
176 | " ) "
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": null,
182 | "metadata": {
183 | "id": "gRh_nF1mQOiy"
184 | },
185 | "outputs": [],
186 | "source": [
187 | "model.load_weights(\"./checkpoint\")"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": null,
193 | "metadata": {},
194 | "outputs": [],
195 | "source": []
196 | }
197 | ],
198 | "metadata": {
199 | "colab": {
200 | "name": "06_Saving_Restoring_TF_Model.ipynb",
201 | "provenance": []
202 | },
203 | "kernelspec": {
204 | "display_name": "Python 3",
205 | "language": "python",
206 | "name": "python3"
207 | },
208 | "language_info": {
209 | "codemirror_mode": {
210 | "name": "ipython",
211 | "version": 3
212 | },
213 | "file_extension": ".py",
214 | "mimetype": "text/x-python",
215 | "name": "python",
216 | "nbconvert_exporter": "python",
217 | "pygments_lexer": "ipython3",
218 | "version": "3.7.4"
219 | }
220 | },
221 | "nbformat": 4,
222 | "nbformat_minor": 4
223 | }
224 |
--------------------------------------------------------------------------------
/ch12/07_Using_TensorFlow_Serving/07_Using_TFX_Serving.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Using TensorFlow Extended for serving the model\n",
8 | "\n",
9 | "We start by loading the necessary libraries."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import tensorflow as tf\n",
19 | "import numpy as np\n",
20 | "import requests\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "import json"
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "## Build a model\n",
30 | "We’ll build a MNIST model using the Keras Sequential API."
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
40 | "\n",
41 | "# Normalize\n",
42 | "x_train = x_train / 255\n",
43 | "x_test = x_test/ 255\n",
44 | "\n",
45 | "model = tf.keras.Sequential()\n",
46 | "model.add(tf.keras.layers.Flatten(name=\"FLATTEN\"))\n",
47 | "model.add(tf.keras.layers.Dense(units=128 , activation=\"relu\", name=\"D1\"))\n",
48 | "model.add(tf.keras.layers.Dense(units=64 , activation=\"relu\", name=\"D2\"))\n",
49 | "model.add(tf.keras.layers.Dense(units=10, activation=\"softmax\", name=\"OUTPUT\"))\n",
50 | " \n",
51 | "model.compile(optimizer=\"sgd\", \n",
52 | " loss=\"sparse_categorical_crossentropy\",\n",
53 | " metrics=[\"accuracy\"]\n",
54 | " )\n",
55 | "\n",
56 | "model.fit(x=x_train, \n",
57 | " y=y_train, \n",
58 | " epochs=5,\n",
59 | " validation_data=(x_test, y_test)\n",
60 | " ) "
61 | ]
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "metadata": {},
66 | "source": [
67 | "## Save the entire model as SavedModel format\n",
68 | "\n",
69 | "Then, we will save our model as SavedModel format and create a directory for each version of our model."
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "metadata": {},
76 | "outputs": [],
77 | "source": [
78 | "# Build a model directory\n",
79 | "!mkdir \"my_mnist_model\"\n",
80 | "\n",
81 | "# Build a directory with the version 1\n",
82 | "!mkdir \"my_mnist_model/1\""
83 | ]
84 | },
85 | {
86 | "cell_type": "code",
87 | "execution_count": null,
88 | "metadata": {},
89 | "outputs": [],
90 | "source": [
91 | "# Save the model\n",
92 | "model.save(\"my_mnist_model/1\")"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "## Download the TensorFlow Serving docker image\n",
100 | "\n",
101 | "We'll install TensorFlow Serving by using Docker.\n",
102 | "\n",
103 | "We encourage reader to visit the official Docker documentation to get Docker installation instructions if needed.\n",
104 | "\n",
105 | "The first step is to pull the latest TensorFlow Serving docker image."
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "!docker pull tensorflow/serving"
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "Now, we'll start a Docker container :\n",
122 | "- publish the REST API port 8501 to our host's port 8501\n",
123 | "- take the previous model `my_mnist_model`\n",
124 | "- bound it to the model base path `/models/my_mnist_model`\n",
125 | "- fill in the environment variable MODEL_NAME with `my_mnist_model`.\n",
126 | "\n",
127 | "\n",
128 | "_Note that this commands should be executed from the command line bash prompt!_\n",
129 | "\n",
130 | "`docker run -p 8501:8501 \\\n",
131 | " --mount type=bind,source=\"$(pwd)/my_mnist_model/\",target=/models/my_mnist_model \\\n",
132 | " -e MODEL_NAME=my_mnist_model -t tensorflow/serving`\n"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "## Display the images to predict"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "num_rows = 4\n",
149 | "num_cols = 3\n",
150 | "plt.figure(figsize=(2*2*num_cols, 2*num_rows))\n",
151 | "for row in range(num_rows):\n",
152 | " for col in range(num_cols):\n",
153 | " index = num_cols * row + col\n",
154 | " image = x_test[index]\n",
155 | " true_label = y_test[index]\n",
156 | " plt.subplot(num_rows, 2*num_cols, 2*index+1)\n",
157 | " plt.imshow(image.reshape(28,28), cmap=\"binary\")\n",
158 | " plt.axis('off')\n",
159 | " plt.title('\\n\\n It is a {}'.format(y_test[index]), fontdict={'size': 16})\n",
160 | "plt.tight_layout()\n",
161 | "plt.show()"
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "metadata": {},
167 | "source": [
168 | "## Send POST predict requests to our TensorFlow Serving\n",
169 | "\n",
170 | "We'll send POST predict requests to our server and pass the five images.\n",
171 | "\n",
172 | "The server will return for each image ten probabilities corresponding to the probability for each digit between 0 and 9. "
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": null,
178 | "metadata": {},
179 | "outputs": [],
180 | "source": [
181 | "json_request = '{{ \"instances\" : {} }}'.format(x_test[0:12].tolist())\n",
182 | "resp = requests.post('http://localhost:8501/v1/models/my_mnist_model:predict', data=json_request, headers = {\"content-type\": \"application/json\"})\n",
183 | "print('response.status_code: {}'.format(resp.status_code)) \n",
184 | "print('response.content: {}'.format(resp.content))\n",
185 | "predictions = json.loads(resp.text)['predictions']"
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {},
191 | "source": [
192 | "Then, we will display the results."
193 | ]
194 | },
195 | {
196 | "cell_type": "code",
197 | "execution_count": null,
198 | "metadata": {},
199 | "outputs": [],
200 | "source": [
201 | "num_rows = 4\n",
202 | "num_cols = 3\n",
203 | "plt.figure(figsize=(2*2*num_cols, 2*num_rows))\n",
204 | "for row in range(num_rows):\n",
205 | " for col in range(num_cols):\n",
206 | " index = num_cols * row + col\n",
207 | " image = x_test[index]\n",
208 | " predicted_label = np.argmax(predictions[index])\n",
209 | " true_label = y_test[index]\n",
210 | " plt.subplot(num_rows, 2*num_cols, 2*index+1)\n",
211 | " plt.imshow(image.reshape(28,28), cmap=\"binary\")\n",
212 | " plt.axis('off')\n",
213 | " if predicted_label == true_label:\n",
214 | " color = 'blue'\n",
215 | " else:\n",
216 | " color = 'red'\n",
217 | " plt.title('\\n\\n The model predicts a {} \\n and it is a {}'.format(predicted_label, true_label), fontdict={'size': 16}, color=color)\n",
218 | "plt.tight_layout()\n",
219 | "plt.show()"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": []
228 | }
229 | ],
230 | "metadata": {
231 | "kernelspec": {
232 | "display_name": "Python 3",
233 | "language": "python",
234 | "name": "python3"
235 | },
236 | "language_info": {
237 | "codemirror_mode": {
238 | "name": "ipython",
239 | "version": 3
240 | },
241 | "file_extension": ".py",
242 | "mimetype": "text/x-python",
243 | "name": "python",
244 | "nbconvert_exporter": "python",
245 | "pygments_lexer": "ipython3",
246 | "version": "3.7.4"
247 | }
248 | },
249 | "nbformat": 4,
250 | "nbformat_minor": 4
251 | }
252 |
--------------------------------------------------------------------------------
/ch6/07_Improving_Linear_Regression/birth_weight.csv:
--------------------------------------------------------------------------------
1 | 1.0,28.0,113.0,1.0,1.0,1.0,0.0,1.0,709.0
2 | 1.0,29.0,130.0,0.0,0.0,0.0,0.0,1.0,1021.0
3 | 1.0,34.0,187.0,1.0,1.0,0.0,1.0,0.0,1135.0
4 | 1.0,25.0,105.0,1.0,0.0,1.0,1.0,0.0,1330.0
5 | 1.0,25.0,85.0,1.0,0.0,0.0,0.0,1.0,1474.0
6 | 1.0,27.0,150.0,1.0,0.0,0.0,0.0,0.0,1588.0
7 | 1.0,23.0,97.0,1.0,0.0,0.0,0.0,1.0,1588.0
8 | 1.0,24.0,128.0,1.0,0.0,1.0,0.0,0.0,1701.0
9 | 1.0,24.0,132.0,1.0,0.0,0.0,1.0,0.0,1729.0
10 | 1.0,21.0,165.0,0.0,1.0,0.0,1.0,0.0,1790.0
11 | 1.0,32.0,105.0,1.0,1.0,0.0,0.0,0.0,1818.0
12 | 1.0,19.0,91.0,0.0,1.0,1.0,0.0,1.0,1885.0
13 | 1.0,25.0,115.0,1.0,0.0,0.0,0.0,0.0,1893.0
14 | 1.0,16.0,130.0,1.0,0.0,0.0,0.0,0.0,1899.0
15 | 1.0,25.0,92.0,0.0,1.0,0.0,0.0,0.0,1928.0
16 | 1.0,20.0,150.0,0.0,1.0,0.0,0.0,0.0,1928.0
17 | 1.0,21.0,190.0,1.0,0.0,0.0,0.0,1.0,1928.0
18 | 1.0,24.0,155.0,0.0,1.0,1.0,0.0,0.0,1936.0
19 | 1.0,21.0,103.0,1.0,0.0,0.0,0.0,0.0,1970.0
20 | 1.0,20.0,125.0,1.0,0.0,0.0,0.0,1.0,2055.0
21 | 1.0,25.0,89.0,1.0,0.0,1.0,0.0,0.0,2055.0
22 | 1.0,19.0,102.0,0.0,0.0,0.0,0.0,0.0,2082.0
23 | 1.0,19.0,112.0,0.0,1.0,0.0,0.0,1.0,2084.0
24 | 1.0,26.0,117.0,0.0,1.0,1.0,0.0,1.0,2084.0
25 | 1.0,24.0,138.0,0.0,0.0,0.0,0.0,0.0,2100.0
26 | 1.0,17.0,130.0,1.0,1.0,1.0,0.0,1.0,2125.0
27 | 1.0,20.0,120.0,1.0,1.0,0.0,0.0,0.0,2126.0
28 | 1.0,22.0,130.0,0.0,1.0,1.0,0.0,1.0,2187.0
29 | 1.0,27.0,130.0,1.0,0.0,0.0,0.0,1.0,2187.0
30 | 1.0,20.0,80.0,1.0,1.0,0.0,0.0,1.0,2211.0
31 | 1.0,17.0,110.0,0.0,1.0,0.0,0.0,0.0,2225.0
32 | 1.0,25.0,105.0,1.0,0.0,1.0,0.0,0.0,2240.0
33 | 1.0,20.0,109.0,1.0,0.0,0.0,0.0,0.0,2240.0
34 | 1.0,18.0,148.0,1.0,0.0,0.0,0.0,0.0,2282.0
35 | 1.0,18.0,110.0,1.0,1.0,1.0,0.0,0.0,2296.0
36 | 1.0,20.0,121.0,0.0,1.0,1.0,0.0,1.0,2296.0
37 | 1.0,21.0,100.0,1.0,0.0,1.0,0.0,0.0,2301.0
38 | 1.0,26.0,96.0,1.0,0.0,0.0,0.0,0.0,2325.0
39 | 1.0,31.0,102.0,0.0,1.0,1.0,0.0,0.0,2353.0
40 | 1.0,15.0,110.0,0.0,0.0,0.0,0.0,0.0,2353.0
41 | 1.0,23.0,187.0,1.0,1.0,0.0,0.0,0.0,2367.0
42 | 1.0,20.0,122.0,1.0,1.0,1.0,0.0,0.0,2381.0
43 | 1.0,24.0,105.0,1.0,1.0,0.0,0.0,0.0,2381.0
44 | 1.0,15.0,115.0,1.0,0.0,0.0,0.0,1.0,2381.0
45 | 1.0,23.0,120.0,1.0,0.0,0.0,0.0,0.0,2395.0
46 | 1.0,30.0,142.0,0.0,1.0,1.0,0.0,0.0,2410.0
47 | 1.0,22.0,130.0,0.0,1.0,0.0,0.0,0.0,2410.0
48 | 1.0,17.0,120.0,0.0,1.0,0.0,0.0,0.0,2414.0
49 | 1.0,23.0,110.0,0.0,1.0,1.0,0.0,0.0,2424.0
50 | 1.0,17.0,120.0,1.0,0.0,0.0,0.0,0.0,2438.0
51 | 1.0,26.0,154.0,1.0,0.0,1.0,1.0,0.0,2442.0
52 | 1.0,20.0,105.0,1.0,0.0,0.0,0.0,0.0,2450.0
53 | 1.0,26.0,168.0,0.0,1.0,0.0,0.0,0.0,2466.0
54 | 1.0,14.0,101.0,1.0,1.0,1.0,0.0,0.0,2466.0
55 | 1.0,28.0,95.0,0.0,1.0,0.0,0.0,0.0,2466.0
56 | 1.0,14.0,100.0,1.0,0.0,0.0,0.0,0.0,2495.0
57 | 1.0,23.0,94.0,1.0,1.0,0.0,0.0,0.0,2495.0
58 | 1.0,17.0,142.0,1.0,0.0,0.0,1.0,0.0,2495.0
59 | 1.0,21.0,130.0,0.0,1.0,0.0,1.0,0.0,2495.0
60 | 0.0,19.0,182.0,1.0,0.0,0.0,0.0,1.0,2523.0
61 | 0.0,33.0,155.0,1.0,0.0,0.0,0.0,0.0,2551.0
62 | 0.0,20.0,105.0,0.0,1.0,0.0,0.0,0.0,2557.0
63 | 0.0,21.0,108.0,0.0,1.0,0.0,0.0,1.0,2594.0
64 | 0.0,18.0,107.0,0.0,1.0,0.0,0.0,1.0,2600.0
65 | 0.0,21.0,124.0,1.0,0.0,0.0,0.0,0.0,2622.0
66 | 0.0,22.0,118.0,0.0,0.0,0.0,0.0,0.0,2637.0
67 | 0.0,17.0,103.0,1.0,0.0,0.0,0.0,0.0,2637.0
68 | 0.0,29.0,123.0,0.0,1.0,0.0,0.0,0.0,2663.0
69 | 0.0,26.0,113.0,0.0,1.0,0.0,0.0,0.0,2665.0
70 | 0.0,19.0,95.0,1.0,0.0,0.0,0.0,0.0,2722.0
71 | 0.0,19.0,150.0,1.0,0.0,0.0,0.0,0.0,2733.0
72 | 0.0,22.0,95.0,1.0,0.0,0.0,1.0,0.0,2750.0
73 | 0.0,30.0,107.0,1.0,0.0,1.0,0.0,1.0,2750.0
74 | 0.0,18.0,100.0,0.0,1.0,0.0,0.0,0.0,2769.0
75 | 0.0,18.0,100.0,1.0,1.0,0.0,0.0,0.0,2769.0
76 | 0.0,15.0,98.0,1.0,0.0,0.0,0.0,0.0,2778.0
77 | 0.0,25.0,118.0,0.0,1.0,0.0,0.0,0.0,2782.0
78 | 0.0,20.0,120.0,1.0,0.0,0.0,0.0,1.0,2807.0
79 | 0.0,28.0,120.0,0.0,1.0,0.0,0.0,0.0,2821.0
80 | 0.0,32.0,121.0,1.0,0.0,0.0,0.0,0.0,2835.0
81 | 0.0,31.0,100.0,0.0,0.0,0.0,0.0,1.0,2835.0
82 | 0.0,36.0,202.0,0.0,0.0,0.0,0.0,0.0,2836.0
83 | 0.0,28.0,120.0,1.0,0.0,0.0,0.0,0.0,2863.0
84 | 0.0,25.0,120.0,1.0,0.0,0.0,0.0,1.0,2877.0
85 | 0.0,28.0,167.0,0.0,0.0,0.0,0.0,0.0,2877.0
86 | 0.0,17.0,122.0,0.0,1.0,0.0,0.0,0.0,2906.0
87 | 0.0,29.0,150.0,0.0,0.0,0.0,0.0,0.0,2920.0
88 | 0.0,26.0,168.0,1.0,1.0,0.0,0.0,0.0,2920.0
89 | 0.0,17.0,113.0,1.0,0.0,0.0,0.0,0.0,2920.0
90 | 0.0,17.0,113.0,1.0,0.0,0.0,0.0,0.0,2920.0
91 | 0.0,24.0,90.0,0.0,1.0,1.0,0.0,0.0,2948.0
92 | 0.0,35.0,121.0,1.0,1.0,1.0,1.0,0.0,2948.0
93 | 0.0,25.0,155.0,0.0,1.0,1.0,0.0,0.0,2977.0
94 | 0.0,25.0,125.0,1.0,0.0,0.0,0.0,0.0,2977.0
95 | 0.0,29.0,140.0,0.0,1.0,0.0,0.0,0.0,2977.0
96 | 0.0,19.0,138.0,0.0,1.0,0.0,1.0,0.0,2977.0
97 | 0.0,27.0,124.0,0.0,1.0,0.0,0.0,0.0,2992.0
98 | 0.0,31.0,115.0,0.0,1.0,0.0,0.0,0.0,3005.0
99 | 0.0,33.0,109.0,0.0,1.0,0.0,0.0,0.0,3033.0
100 | 0.0,21.0,185.0,1.0,1.0,0.0,0.0,0.0,3042.0
101 | 0.0,19.0,189.0,0.0,0.0,0.0,0.0,0.0,3062.0
102 | 0.0,23.0,130.0,1.0,0.0,0.0,0.0,0.0,3062.0
103 | 0.0,21.0,160.0,0.0,0.0,0.0,0.0,0.0,3062.0
104 | 0.0,18.0,90.0,0.0,1.0,0.0,0.0,1.0,3076.0
105 | 0.0,18.0,90.0,0.0,1.0,0.0,0.0,1.0,3076.0
106 | 0.0,32.0,132.0,0.0,0.0,0.0,0.0,0.0,3080.0
107 | 0.0,19.0,132.0,1.0,0.0,0.0,0.0,0.0,3090.0
108 | 0.0,24.0,115.0,0.0,0.0,0.0,0.0,0.0,3090.0
109 | 0.0,22.0,85.0,1.0,1.0,0.0,0.0,0.0,3090.0
110 | 0.0,22.0,120.0,0.0,0.0,0.0,1.0,0.0,3100.0
111 | 0.0,23.0,128.0,1.0,0.0,0.0,0.0,0.0,3104.0
112 | 0.0,22.0,130.0,0.0,1.0,0.0,0.0,0.0,3132.0
113 | 0.0,30.0,95.0,0.0,1.0,0.0,0.0,0.0,3147.0
114 | 0.0,19.0,115.0,1.0,0.0,0.0,0.0,0.0,3175.0
115 | 0.0,16.0,110.0,1.0,0.0,0.0,0.0,0.0,3175.0
116 | 0.0,21.0,110.0,1.0,1.0,0.0,0.0,1.0,3203.0
117 | 0.0,30.0,153.0,1.0,0.0,0.0,0.0,0.0,3203.0
118 | 0.0,20.0,103.0,1.0,0.0,0.0,0.0,0.0,3203.0
119 | 0.0,17.0,119.0,1.0,0.0,0.0,0.0,0.0,3225.0
120 | 0.0,17.0,119.0,1.0,0.0,0.0,0.0,0.0,3225.0
121 | 0.0,23.0,119.0,1.0,0.0,0.0,0.0,0.0,3232.0
122 | 0.0,24.0,110.0,1.0,0.0,0.0,0.0,0.0,3232.0
123 | 0.0,28.0,140.0,0.0,0.0,0.0,0.0,0.0,3234.0
124 | 0.0,26.0,133.0,1.0,1.0,0.0,0.0,0.0,3260.0
125 | 0.0,20.0,169.0,1.0,0.0,1.0,0.0,1.0,3274.0
126 | 0.0,24.0,115.0,1.0,0.0,0.0,0.0,0.0,3274.0
127 | 0.0,28.0,250.0,1.0,1.0,0.0,0.0,0.0,3303.0
128 | 0.0,20.0,141.0,0.0,0.0,0.0,0.0,1.0,3317.0
129 | 0.0,22.0,158.0,1.0,0.0,1.0,0.0,0.0,3317.0
130 | 0.0,22.0,112.0,0.0,1.0,1.0,0.0,0.0,3317.0
131 | 0.0,31.0,150.0,1.0,1.0,0.0,0.0,0.0,3321.0
132 | 0.0,23.0,115.0,1.0,1.0,0.0,0.0,0.0,3331.0
133 | 0.0,16.0,112.0,1.0,0.0,0.0,0.0,0.0,3374.0
134 | 0.0,16.0,135.0,0.0,1.0,0.0,0.0,0.0,3374.0
135 | 0.0,18.0,229.0,1.0,0.0,0.0,0.0,0.0,3402.0
136 | 0.0,25.0,140.0,0.0,0.0,0.0,0.0,0.0,3416.0
137 | 0.0,32.0,134.0,0.0,1.0,1.0,0.0,0.0,3430.0
138 | 0.0,20.0,121.0,1.0,1.0,0.0,0.0,0.0,3444.0
139 | 0.0,23.0,190.0,0.0,0.0,0.0,0.0,0.0,3459.0
140 | 0.0,22.0,131.0,0.0,0.0,0.0,0.0,0.0,3460.0
141 | 0.0,32.0,170.0,0.0,0.0,0.0,0.0,0.0,3473.0
142 | 0.0,30.0,110.0,1.0,0.0,0.0,0.0,0.0,3475.0
143 | 0.0,20.0,127.0,1.0,0.0,0.0,0.0,0.0,3487.0
144 | 0.0,23.0,123.0,1.0,0.0,0.0,0.0,0.0,3544.0
145 | 0.0,17.0,120.0,1.0,1.0,0.0,0.0,0.0,3572.0
146 | 0.0,19.0,105.0,1.0,0.0,0.0,0.0,0.0,3572.0
147 | 0.0,23.0,130.0,0.0,0.0,0.0,0.0,0.0,3586.0
148 | 0.0,36.0,175.0,0.0,0.0,0.0,0.0,0.0,3600.0
149 | 0.0,22.0,125.0,0.0,0.0,0.0,0.0,0.0,3614.0
150 | 0.0,24.0,133.0,0.0,0.0,0.0,0.0,0.0,3614.0
151 | 0.0,21.0,134.0,1.0,0.0,0.0,0.0,0.0,3629.0
152 | 0.0,19.0,235.0,0.0,1.0,0.0,1.0,0.0,3629.0
153 | 0.0,25.0,200.0,0.0,0.0,1.0,0.0,1.0,3637.0
154 | 0.0,16.0,135.0,0.0,1.0,0.0,0.0,0.0,3643.0
155 | 0.0,29.0,135.0,0.0,0.0,0.0,0.0,0.0,3651.0
156 | 0.0,29.0,154.0,0.0,0.0,0.0,0.0,0.0,3651.0
157 | 0.0,19.0,147.0,0.0,1.0,0.0,0.0,0.0,3651.0
158 | 0.0,19.0,147.0,0.0,1.0,0.0,0.0,0.0,3651.0
159 | 0.0,30.0,137.0,0.0,0.0,0.0,0.0,0.0,3699.0
160 | 0.0,24.0,110.0,0.0,0.0,0.0,0.0,0.0,3728.0
161 | 0.0,19.0,184.0,0.0,1.0,0.0,1.0,0.0,3756.0
162 | 0.0,24.0,110.0,0.0,0.0,1.0,0.0,0.0,3770.0
163 | 0.0,23.0,110.0,0.0,0.0,0.0,0.0,0.0,3770.0
164 | 0.0,20.0,120.0,1.0,0.0,0.0,0.0,0.0,3770.0
165 | 0.0,25.0,141.0,0.0,0.0,0.0,1.0,0.0,3790.0
166 | 0.0,30.0,112.0,0.0,0.0,0.0,0.0,0.0,3799.0
167 | 0.0,22.0,169.0,0.0,0.0,0.0,0.0,0.0,3827.0
168 | 0.0,18.0,120.0,0.0,1.0,0.0,0.0,0.0,3856.0
169 | 0.0,16.0,170.0,1.0,0.0,0.0,0.0,0.0,3860.0
170 | 0.0,32.0,186.0,0.0,0.0,0.0,0.0,0.0,3860.0
171 | 0.0,18.0,120.0,1.0,0.0,0.0,0.0,0.0,3884.0
172 | 0.0,29.0,130.0,0.0,1.0,0.0,0.0,0.0,3884.0
173 | 0.0,33.0,117.0,0.0,0.0,0.0,0.0,1.0,3912.0
174 | 0.0,20.0,170.0,0.0,1.0,0.0,0.0,0.0,3940.0
175 | 0.0,28.0,134.0,1.0,0.0,0.0,0.0,0.0,3941.0
176 | 0.0,14.0,135.0,0.0,0.0,1.0,0.0,0.0,3941.0
177 | 0.0,28.0,130.0,1.0,0.0,0.0,0.0,0.0,3969.0
178 | 0.0,25.0,120.0,0.0,0.0,0.0,0.0,0.0,3983.0
179 | 0.0,16.0,135.0,1.0,0.0,0.0,0.0,0.0,3997.0
180 | 0.0,20.0,158.0,0.0,0.0,0.0,0.0,0.0,3997.0
181 | 0.0,26.0,160.0,0.0,0.0,0.0,0.0,0.0,4054.0
182 | 0.0,21.0,115.0,0.0,0.0,0.0,0.0,0.0,4054.0
183 | 0.0,22.0,129.0,0.0,0.0,0.0,0.0,0.0,4111.0
184 | 0.0,25.0,130.0,0.0,0.0,0.0,0.0,0.0,4153.0
185 | 0.0,31.0,120.0,0.0,0.0,0.0,0.0,0.0,4167.0
186 | 0.0,35.0,170.0,0.0,0.0,1.0,0.0,0.0,4174.0
187 | 0.0,19.0,120.0,0.0,1.0,0.0,1.0,0.0,4238.0
188 | 0.0,24.0,216.0,0.0,0.0,0.0,0.0,0.0,4593.0
189 | 0.0,45.0,123.0,0.0,0.0,1.0,0.0,0.0,4990.0
190 |
--------------------------------------------------------------------------------
/ch6/06_Using_Multiple_Layers/birth_weight.csv:
--------------------------------------------------------------------------------
1 | LOW,AGE,LWT,RACE,SMOKE,PTL,HT,UI,BWT
2 | 1.0,28.0,113.0,1.0,1.0,1.0,0.0,1.0,709.0
3 | 1.0,29.0,130.0,0.0,0.0,0.0,0.0,1.0,1021.0
4 | 1.0,34.0,187.0,1.0,1.0,0.0,1.0,0.0,1135.0
5 | 1.0,25.0,105.0,1.0,0.0,1.0,1.0,0.0,1330.0
6 | 1.0,25.0,85.0,1.0,0.0,0.0,0.0,1.0,1474.0
7 | 1.0,27.0,150.0,1.0,0.0,0.0,0.0,0.0,1588.0
8 | 1.0,23.0,97.0,1.0,0.0,0.0,0.0,1.0,1588.0
9 | 1.0,24.0,128.0,1.0,0.0,1.0,0.0,0.0,1701.0
10 | 1.0,24.0,132.0,1.0,0.0,0.0,1.0,0.0,1729.0
11 | 1.0,21.0,165.0,0.0,1.0,0.0,1.0,0.0,1790.0
12 | 1.0,32.0,105.0,1.0,1.0,0.0,0.0,0.0,1818.0
13 | 1.0,19.0,91.0,0.0,1.0,1.0,0.0,1.0,1885.0
14 | 1.0,25.0,115.0,1.0,0.0,0.0,0.0,0.0,1893.0
15 | 1.0,16.0,130.0,1.0,0.0,0.0,0.0,0.0,1899.0
16 | 1.0,25.0,92.0,0.0,1.0,0.0,0.0,0.0,1928.0
17 | 1.0,20.0,150.0,0.0,1.0,0.0,0.0,0.0,1928.0
18 | 1.0,21.0,190.0,1.0,0.0,0.0,0.0,1.0,1928.0
19 | 1.0,24.0,155.0,0.0,1.0,1.0,0.0,0.0,1936.0
20 | 1.0,21.0,103.0,1.0,0.0,0.0,0.0,0.0,1970.0
21 | 1.0,20.0,125.0,1.0,0.0,0.0,0.0,1.0,2055.0
22 | 1.0,25.0,89.0,1.0,0.0,1.0,0.0,0.0,2055.0
23 | 1.0,19.0,102.0,0.0,0.0,0.0,0.0,0.0,2082.0
24 | 1.0,19.0,112.0,0.0,1.0,0.0,0.0,1.0,2084.0
25 | 1.0,26.0,117.0,0.0,1.0,1.0,0.0,1.0,2084.0
26 | 1.0,24.0,138.0,0.0,0.0,0.0,0.0,0.0,2100.0
27 | 1.0,17.0,130.0,1.0,1.0,1.0,0.0,1.0,2125.0
28 | 1.0,20.0,120.0,1.0,1.0,0.0,0.0,0.0,2126.0
29 | 1.0,22.0,130.0,0.0,1.0,1.0,0.0,1.0,2187.0
30 | 1.0,27.0,130.0,1.0,0.0,0.0,0.0,1.0,2187.0
31 | 1.0,20.0,80.0,1.0,1.0,0.0,0.0,1.0,2211.0
32 | 1.0,17.0,110.0,0.0,1.0,0.0,0.0,0.0,2225.0
33 | 1.0,25.0,105.0,1.0,0.0,1.0,0.0,0.0,2240.0
34 | 1.0,20.0,109.0,1.0,0.0,0.0,0.0,0.0,2240.0
35 | 1.0,18.0,148.0,1.0,0.0,0.0,0.0,0.0,2282.0
36 | 1.0,18.0,110.0,1.0,1.0,1.0,0.0,0.0,2296.0
37 | 1.0,20.0,121.0,0.0,1.0,1.0,0.0,1.0,2296.0
38 | 1.0,21.0,100.0,1.0,0.0,1.0,0.0,0.0,2301.0
39 | 1.0,26.0,96.0,1.0,0.0,0.0,0.0,0.0,2325.0
40 | 1.0,31.0,102.0,0.0,1.0,1.0,0.0,0.0,2353.0
41 | 1.0,15.0,110.0,0.0,0.0,0.0,0.0,0.0,2353.0
42 | 1.0,23.0,187.0,1.0,1.0,0.0,0.0,0.0,2367.0
43 | 1.0,20.0,122.0,1.0,1.0,1.0,0.0,0.0,2381.0
44 | 1.0,24.0,105.0,1.0,1.0,0.0,0.0,0.0,2381.0
45 | 1.0,15.0,115.0,1.0,0.0,0.0,0.0,1.0,2381.0
46 | 1.0,23.0,120.0,1.0,0.0,0.0,0.0,0.0,2395.0
47 | 1.0,30.0,142.0,0.0,1.0,1.0,0.0,0.0,2410.0
48 | 1.0,22.0,130.0,0.0,1.0,0.0,0.0,0.0,2410.0
49 | 1.0,17.0,120.0,0.0,1.0,0.0,0.0,0.0,2414.0
50 | 1.0,23.0,110.0,0.0,1.0,1.0,0.0,0.0,2424.0
51 | 1.0,17.0,120.0,1.0,0.0,0.0,0.0,0.0,2438.0
52 | 1.0,26.0,154.0,1.0,0.0,1.0,1.0,0.0,2442.0
53 | 1.0,20.0,105.0,1.0,0.0,0.0,0.0,0.0,2450.0
54 | 1.0,26.0,168.0,0.0,1.0,0.0,0.0,0.0,2466.0
55 | 1.0,14.0,101.0,1.0,1.0,1.0,0.0,0.0,2466.0
56 | 1.0,28.0,95.0,0.0,1.0,0.0,0.0,0.0,2466.0
57 | 1.0,14.0,100.0,1.0,0.0,0.0,0.0,0.0,2495.0
58 | 1.0,23.0,94.0,1.0,1.0,0.0,0.0,0.0,2495.0
59 | 1.0,17.0,142.0,1.0,0.0,0.0,1.0,0.0,2495.0
60 | 1.0,21.0,130.0,0.0,1.0,0.0,1.0,0.0,2495.0
61 | 0.0,19.0,182.0,1.0,0.0,0.0,0.0,1.0,2523.0
62 | 0.0,33.0,155.0,1.0,0.0,0.0,0.0,0.0,2551.0
63 | 0.0,20.0,105.0,0.0,1.0,0.0,0.0,0.0,2557.0
64 | 0.0,21.0,108.0,0.0,1.0,0.0,0.0,1.0,2594.0
65 | 0.0,18.0,107.0,0.0,1.0,0.0,0.0,1.0,2600.0
66 | 0.0,21.0,124.0,1.0,0.0,0.0,0.0,0.0,2622.0
67 | 0.0,22.0,118.0,0.0,0.0,0.0,0.0,0.0,2637.0
68 | 0.0,17.0,103.0,1.0,0.0,0.0,0.0,0.0,2637.0
69 | 0.0,29.0,123.0,0.0,1.0,0.0,0.0,0.0,2663.0
70 | 0.0,26.0,113.0,0.0,1.0,0.0,0.0,0.0,2665.0
71 | 0.0,19.0,95.0,1.0,0.0,0.0,0.0,0.0,2722.0
72 | 0.0,19.0,150.0,1.0,0.0,0.0,0.0,0.0,2733.0
73 | 0.0,22.0,95.0,1.0,0.0,0.0,1.0,0.0,2750.0
74 | 0.0,30.0,107.0,1.0,0.0,1.0,0.0,1.0,2750.0
75 | 0.0,18.0,100.0,0.0,1.0,0.0,0.0,0.0,2769.0
76 | 0.0,18.0,100.0,1.0,1.0,0.0,0.0,0.0,2769.0
77 | 0.0,15.0,98.0,1.0,0.0,0.0,0.0,0.0,2778.0
78 | 0.0,25.0,118.0,0.0,1.0,0.0,0.0,0.0,2782.0
79 | 0.0,20.0,120.0,1.0,0.0,0.0,0.0,1.0,2807.0
80 | 0.0,28.0,120.0,0.0,1.0,0.0,0.0,0.0,2821.0
81 | 0.0,32.0,121.0,1.0,0.0,0.0,0.0,0.0,2835.0
82 | 0.0,31.0,100.0,0.0,0.0,0.0,0.0,1.0,2835.0
83 | 0.0,36.0,202.0,0.0,0.0,0.0,0.0,0.0,2836.0
84 | 0.0,28.0,120.0,1.0,0.0,0.0,0.0,0.0,2863.0
85 | 0.0,25.0,120.0,1.0,0.0,0.0,0.0,1.0,2877.0
86 | 0.0,28.0,167.0,0.0,0.0,0.0,0.0,0.0,2877.0
87 | 0.0,17.0,122.0,0.0,1.0,0.0,0.0,0.0,2906.0
88 | 0.0,29.0,150.0,0.0,0.0,0.0,0.0,0.0,2920.0
89 | 0.0,26.0,168.0,1.0,1.0,0.0,0.0,0.0,2920.0
90 | 0.0,17.0,113.0,1.0,0.0,0.0,0.0,0.0,2920.0
91 | 0.0,17.0,113.0,1.0,0.0,0.0,0.0,0.0,2920.0
92 | 0.0,24.0,90.0,0.0,1.0,1.0,0.0,0.0,2948.0
93 | 0.0,35.0,121.0,1.0,1.0,1.0,1.0,0.0,2948.0
94 | 0.0,25.0,155.0,0.0,1.0,1.0,0.0,0.0,2977.0
95 | 0.0,25.0,125.0,1.0,0.0,0.0,0.0,0.0,2977.0
96 | 0.0,29.0,140.0,0.0,1.0,0.0,0.0,0.0,2977.0
97 | 0.0,19.0,138.0,0.0,1.0,0.0,1.0,0.0,2977.0
98 | 0.0,27.0,124.0,0.0,1.0,0.0,0.0,0.0,2992.0
99 | 0.0,31.0,115.0,0.0,1.0,0.0,0.0,0.0,3005.0
100 | 0.0,33.0,109.0,0.0,1.0,0.0,0.0,0.0,3033.0
101 | 0.0,21.0,185.0,1.0,1.0,0.0,0.0,0.0,3042.0
102 | 0.0,19.0,189.0,0.0,0.0,0.0,0.0,0.0,3062.0
103 | 0.0,23.0,130.0,1.0,0.0,0.0,0.0,0.0,3062.0
104 | 0.0,21.0,160.0,0.0,0.0,0.0,0.0,0.0,3062.0
105 | 0.0,18.0,90.0,0.0,1.0,0.0,0.0,1.0,3076.0
106 | 0.0,18.0,90.0,0.0,1.0,0.0,0.0,1.0,3076.0
107 | 0.0,32.0,132.0,0.0,0.0,0.0,0.0,0.0,3080.0
108 | 0.0,19.0,132.0,1.0,0.0,0.0,0.0,0.0,3090.0
109 | 0.0,24.0,115.0,0.0,0.0,0.0,0.0,0.0,3090.0
110 | 0.0,22.0,85.0,1.0,1.0,0.0,0.0,0.0,3090.0
111 | 0.0,22.0,120.0,0.0,0.0,0.0,1.0,0.0,3100.0
112 | 0.0,23.0,128.0,1.0,0.0,0.0,0.0,0.0,3104.0
113 | 0.0,22.0,130.0,0.0,1.0,0.0,0.0,0.0,3132.0
114 | 0.0,30.0,95.0,0.0,1.0,0.0,0.0,0.0,3147.0
115 | 0.0,19.0,115.0,1.0,0.0,0.0,0.0,0.0,3175.0
116 | 0.0,16.0,110.0,1.0,0.0,0.0,0.0,0.0,3175.0
117 | 0.0,21.0,110.0,1.0,1.0,0.0,0.0,1.0,3203.0
118 | 0.0,30.0,153.0,1.0,0.0,0.0,0.0,0.0,3203.0
119 | 0.0,20.0,103.0,1.0,0.0,0.0,0.0,0.0,3203.0
120 | 0.0,17.0,119.0,1.0,0.0,0.0,0.0,0.0,3225.0
121 | 0.0,17.0,119.0,1.0,0.0,0.0,0.0,0.0,3225.0
122 | 0.0,23.0,119.0,1.0,0.0,0.0,0.0,0.0,3232.0
123 | 0.0,24.0,110.0,1.0,0.0,0.0,0.0,0.0,3232.0
124 | 0.0,28.0,140.0,0.0,0.0,0.0,0.0,0.0,3234.0
125 | 0.0,26.0,133.0,1.0,1.0,0.0,0.0,0.0,3260.0
126 | 0.0,20.0,169.0,1.0,0.0,1.0,0.0,1.0,3274.0
127 | 0.0,24.0,115.0,1.0,0.0,0.0,0.0,0.0,3274.0
128 | 0.0,28.0,250.0,1.0,1.0,0.0,0.0,0.0,3303.0
129 | 0.0,20.0,141.0,0.0,0.0,0.0,0.0,1.0,3317.0
130 | 0.0,22.0,158.0,1.0,0.0,1.0,0.0,0.0,3317.0
131 | 0.0,22.0,112.0,0.0,1.0,1.0,0.0,0.0,3317.0
132 | 0.0,31.0,150.0,1.0,1.0,0.0,0.0,0.0,3321.0
133 | 0.0,23.0,115.0,1.0,1.0,0.0,0.0,0.0,3331.0
134 | 0.0,16.0,112.0,1.0,0.0,0.0,0.0,0.0,3374.0
135 | 0.0,16.0,135.0,0.0,1.0,0.0,0.0,0.0,3374.0
136 | 0.0,18.0,229.0,1.0,0.0,0.0,0.0,0.0,3402.0
137 | 0.0,25.0,140.0,0.0,0.0,0.0,0.0,0.0,3416.0
138 | 0.0,32.0,134.0,0.0,1.0,1.0,0.0,0.0,3430.0
139 | 0.0,20.0,121.0,1.0,1.0,0.0,0.0,0.0,3444.0
140 | 0.0,23.0,190.0,0.0,0.0,0.0,0.0,0.0,3459.0
141 | 0.0,22.0,131.0,0.0,0.0,0.0,0.0,0.0,3460.0
142 | 0.0,32.0,170.0,0.0,0.0,0.0,0.0,0.0,3473.0
143 | 0.0,30.0,110.0,1.0,0.0,0.0,0.0,0.0,3475.0
144 | 0.0,20.0,127.0,1.0,0.0,0.0,0.0,0.0,3487.0
145 | 0.0,23.0,123.0,1.0,0.0,0.0,0.0,0.0,3544.0
146 | 0.0,17.0,120.0,1.0,1.0,0.0,0.0,0.0,3572.0
147 | 0.0,19.0,105.0,1.0,0.0,0.0,0.0,0.0,3572.0
148 | 0.0,23.0,130.0,0.0,0.0,0.0,0.0,0.0,3586.0
149 | 0.0,36.0,175.0,0.0,0.0,0.0,0.0,0.0,3600.0
150 | 0.0,22.0,125.0,0.0,0.0,0.0,0.0,0.0,3614.0
151 | 0.0,24.0,133.0,0.0,0.0,0.0,0.0,0.0,3614.0
152 | 0.0,21.0,134.0,1.0,0.0,0.0,0.0,0.0,3629.0
153 | 0.0,19.0,235.0,0.0,1.0,0.0,1.0,0.0,3629.0
154 | 0.0,25.0,200.0,0.0,0.0,1.0,0.0,1.0,3637.0
155 | 0.0,16.0,135.0,0.0,1.0,0.0,0.0,0.0,3643.0
156 | 0.0,29.0,135.0,0.0,0.0,0.0,0.0,0.0,3651.0
157 | 0.0,29.0,154.0,0.0,0.0,0.0,0.0,0.0,3651.0
158 | 0.0,19.0,147.0,0.0,1.0,0.0,0.0,0.0,3651.0
159 | 0.0,19.0,147.0,0.0,1.0,0.0,0.0,0.0,3651.0
160 | 0.0,30.0,137.0,0.0,0.0,0.0,0.0,0.0,3699.0
161 | 0.0,24.0,110.0,0.0,0.0,0.0,0.0,0.0,3728.0
162 | 0.0,19.0,184.0,0.0,1.0,0.0,1.0,0.0,3756.0
163 | 0.0,24.0,110.0,0.0,0.0,1.0,0.0,0.0,3770.0
164 | 0.0,23.0,110.0,0.0,0.0,0.0,0.0,0.0,3770.0
165 | 0.0,20.0,120.0,1.0,0.0,0.0,0.0,0.0,3770.0
166 | 0.0,25.0,141.0,0.0,0.0,0.0,1.0,0.0,3790.0
167 | 0.0,30.0,112.0,0.0,0.0,0.0,0.0,0.0,3799.0
168 | 0.0,22.0,169.0,0.0,0.0,0.0,0.0,0.0,3827.0
169 | 0.0,18.0,120.0,0.0,1.0,0.0,0.0,0.0,3856.0
170 | 0.0,16.0,170.0,1.0,0.0,0.0,0.0,0.0,3860.0
171 | 0.0,32.0,186.0,0.0,0.0,0.0,0.0,0.0,3860.0
172 | 0.0,18.0,120.0,1.0,0.0,0.0,0.0,0.0,3884.0
173 | 0.0,29.0,130.0,0.0,1.0,0.0,0.0,0.0,3884.0
174 | 0.0,33.0,117.0,0.0,0.0,0.0,0.0,1.0,3912.0
175 | 0.0,20.0,170.0,0.0,1.0,0.0,0.0,0.0,3940.0
176 | 0.0,28.0,134.0,1.0,0.0,0.0,0.0,0.0,3941.0
177 | 0.0,14.0,135.0,0.0,0.0,1.0,0.0,0.0,3941.0
178 | 0.0,28.0,130.0,1.0,0.0,0.0,0.0,0.0,3969.0
179 | 0.0,25.0,120.0,0.0,0.0,0.0,0.0,0.0,3983.0
180 | 0.0,16.0,135.0,1.0,0.0,0.0,0.0,0.0,3997.0
181 | 0.0,20.0,158.0,0.0,0.0,0.0,0.0,0.0,3997.0
182 | 0.0,26.0,160.0,0.0,0.0,0.0,0.0,0.0,4054.0
183 | 0.0,21.0,115.0,0.0,0.0,0.0,0.0,0.0,4054.0
184 | 0.0,22.0,129.0,0.0,0.0,0.0,0.0,0.0,4111.0
185 | 0.0,25.0,130.0,0.0,0.0,0.0,0.0,0.0,4153.0
186 | 0.0,31.0,120.0,0.0,0.0,0.0,0.0,0.0,4167.0
187 | 0.0,35.0,170.0,0.0,0.0,1.0,0.0,0.0,4174.0
188 | 0.0,19.0,120.0,0.0,1.0,0.0,1.0,0.0,4238.0
189 | 0.0,24.0,216.0,0.0,0.0,0.0,0.0,0.0,4593.0
190 | 0.0,45.0,123.0,0.0,0.0,1.0,0.0,0.0,4990.0
191 |
--------------------------------------------------------------------------------
/ch3/04-keras-subclassing_api.ipynb:
--------------------------------------------------------------------------------
1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"accelerator":"GPU","colab":{"name":"04-keras-subclassing_api.ipynb","provenance":[],"collapsed_sections":[]},"kernelspec":{"display_name":"tf2","language":"python","name":"tf2"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.7.7"}},"cells":[{"cell_type":"markdown","metadata":{"id":"QSTTcZehB1Vo"},"source":["# Using Keras Subclassing API\n","-----------------------------"]},{"cell_type":"code","metadata":{"id":"jXAUhAri0ZO4","executionInfo":{"status":"ok","timestamp":1605426267685,"user_tz":-60,"elapsed":1940,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["import tensorflow as tf\n","from tensorflow import keras"],"execution_count":1,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"5rxLiuLp00SO"},"source":["Load MNIST dataset"]},{"cell_type":"code","metadata":{"id":"Er5Lw3LY0tbS","executionInfo":{"status":"ok","timestamp":1605426267897,"user_tz":-60,"elapsed":1131,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["mnist = tf.keras.datasets.mnist\n","(X_mnist_train, y_mnist_train), (X_mnist_test, y_mnist_test) = mnist.load_data()"],"execution_count":2,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"RDzS5CoZ01K0"},"source":["Normalize the grayscale image"]},{"cell_type":"code","metadata":{"id":"9fDHuK9n0tjL","executionInfo":{"status":"ok","timestamp":1605426268588,"user_tz":-60,"elapsed":756,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["def normalize_grayscale(image_data):\n"," \"\"\"\n"," Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]\n"," :param image_data: The image data to be normalized\n"," :return: Normalized image data\n"," \"\"\"\n"," a = 0.1\n"," b = 0.9\n"," image_data_min = image_data.min()\n"," image_data_max = image_data.max()\n"," return a + ((image_data - image_data_min)*(b-a))/(image_data_max - image_data_min)\n","\n","train_mnist_features = normalize_grayscale(X_mnist_train)\n","test_mnist_features = normalize_grayscale(X_mnist_test)"],"execution_count":3,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"7NVoyWPvElRC"},"source":["### 1 - Create a Custom Layer"]},{"cell_type":"code","metadata":{"id":"7EGGaXtXEhru","executionInfo":{"status":"ok","timestamp":1605425959944,"user_tz":-60,"elapsed":2798,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["class MyCustomDense(tf.keras.layers.Layer):\n"," # Initialize this class with the number of units\n"," def __init__(self, units):\n"," super(MyCustomDense, self).__init__()\n"," self.units = units\n"," \n"," # Define the weights and the bias\n"," def build(self, input_shape):\n"," self.w = self.add_weight(shape=(input_shape[-1], self.units),\n"," initializer='random_normal',\n"," trainable=True)\n"," self.b = self.add_weight(shape=(self.units,),\n"," initializer='random_normal',\n"," trainable=True)\n"," \n"," # Applying this layer transformation to the input tensor\n"," def call(self, inputs):\n"," return tf.matmul(inputs, self.w) + self.b\n"," \n"," # Function to retrieve the configuration\n"," def get_config(self):\n"," return {'units': self.units}"],"execution_count":4,"outputs":[]},{"cell_type":"code","metadata":{"id":"0uo5H7JAWG5L","executionInfo":{"status":"ok","timestamp":1605425966698,"user_tz":-60,"elapsed":9545,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"01e66be1-a634-4500-ee9f-aa0469dec49c","colab":{"base_uri":"https://localhost:8080/"}},"source":["x = tf.ones((2,2))\n","my_custom_layer = MyCustomDense(4)\n","y = my_custom_layer(x)\n","print(y)"],"execution_count":5,"outputs":[{"output_type":"stream","text":["tf.Tensor(\n","[[-0.09472567 0.11487222 -0.14412683 0.03258352]\n"," [-0.09472567 0.11487222 -0.14412683 0.03258352]], shape=(2, 4), dtype=float32)\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"J0xaa5TBVLMH","executionInfo":{"status":"ok","timestamp":1605425966699,"user_tz":-60,"elapsed":9545,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["# Create an input layer\n","inputs = keras.Input((12,4))\n","# Add an instance of MyCustomeDense layer\n","outputs = MyCustomDense(2)(inputs)\n","\n","# Create a model\n","model = keras.Model(inputs, outputs)\n","\n","# Get the model config\n","config = model.get_config()\n","\n","# Reload the model from the config\n","new_model = keras.Model.from_config(config, \n"," custom_objects={'MyCustomDense': MyCustomDense})"],"execution_count":6,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"e_owBbnGEpQr"},"source":["### 2 - Create a Custom Model"]},{"cell_type":"code","metadata":{"id":"MT2yyYiFEpba","executionInfo":{"status":"ok","timestamp":1605426275139,"user_tz":-60,"elapsed":529,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["class MyMNISTModel(tf.keras.Model):\n"," def __init__(self, num_classes):\n"," super(MyMNISTModel, self).__init__(name='my_mnist_model')\n"," self.num_classes = num_classes\n"," # Defining the layers\n"," self.flatten_1 = tf.keras.layers.Flatten()\n"," self.dropout = tf.keras.layers.Dropout(0.1)\n"," self.dense_1 = tf.keras.layers.Dense(50, activation='relu')\n"," self.dense_2 = tf.keras.layers.Dense(10, activation='softmax')\n","\n"," # We define our forward pass using layers created in the init method\n"," def call(self, inputs, training=False):\n"," x = self.flatten_1(inputs)\n"," x = self.dense_1(x)\n"," # Apply dropout only during the training phase\n"," if training:\n"," x = self.dropout(x, training=training)\n"," return self.dense_2(x)\n"," "],"execution_count":4,"outputs":[]},{"cell_type":"code","metadata":{"id":"x152BF61UBM3","executionInfo":{"status":"ok","timestamp":1605426277036,"user_tz":-60,"elapsed":1381,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":["my_mnist_model = MyMNISTModel(10)"],"execution_count":5,"outputs":[]},{"cell_type":"code","metadata":{"id":"HMNeRbPkJAim","executionInfo":{"status":"ok","timestamp":1605426315146,"user_tz":-60,"elapsed":38471,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"9edd0102-b5d2-45a9-a9e4-fe5960756e84","colab":{"base_uri":"https://localhost:8080/"}},"source":["# Compile\n","my_mnist_model.compile(optimizer='sgd',\n"," loss='sparse_categorical_crossentropy',\n"," metrics=['accuracy'])\n","\n","# Train\n","my_mnist_model.fit(train_mnist_features, y_mnist_train, \n"," validation_data=(test_mnist_features, y_mnist_test), \n"," epochs=10)"],"execution_count":6,"outputs":[{"output_type":"stream","text":["Epoch 1/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.8260 - accuracy: 0.7817 - val_loss: 0.4128 - val_accuracy: 0.8886\n","Epoch 2/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.4119 - accuracy: 0.8836 - val_loss: 0.3378 - val_accuracy: 0.9056\n","Epoch 3/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.3625 - accuracy: 0.8950 - val_loss: 0.3059 - val_accuracy: 0.9140\n","Epoch 4/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.3358 - accuracy: 0.9036 - val_loss: 0.2870 - val_accuracy: 0.9191\n","Epoch 5/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.3167 - accuracy: 0.9089 - val_loss: 0.2705 - val_accuracy: 0.9251\n","Epoch 6/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.3003 - accuracy: 0.9145 - val_loss: 0.2595 - val_accuracy: 0.9261\n","Epoch 7/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.2862 - accuracy: 0.9180 - val_loss: 0.2489 - val_accuracy: 0.9289\n","Epoch 8/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.2741 - accuracy: 0.9211 - val_loss: 0.2357 - val_accuracy: 0.9341\n","Epoch 9/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.2616 - accuracy: 0.9253 - val_loss: 0.2245 - val_accuracy: 0.9358\n","Epoch 10/10\n","1875/1875 [==============================] - 4s 2ms/step - loss: 0.2513 - accuracy: 0.9279 - val_loss: 0.2158 - val_accuracy: 0.9386\n"],"name":"stdout"},{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":6}]},{"cell_type":"code","metadata":{"id":"axvHIjoZs92_","executionInfo":{"status":"ok","timestamp":1605426004699,"user_tz":-60,"elapsed":47535,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}}},"source":[""],"execution_count":9,"outputs":[]}]}
--------------------------------------------------------------------------------
/ch12/02_Hyperparameter_tuning_with_HParams/02_Hyperparameter_tuning_with_HParams.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "CfjQHIfmapOJ"
8 | },
9 | "source": [
10 | "# Manage Hyperparameter Tuning with TensorBoard's HParams \n",
11 | "-----------------------------\n",
12 | "\n",
13 | "Tuning hyperparameters in Machine Learning project could be a real pain. The process is iterative and can take a long time to test all the hyperparameters combination.\n",
14 | "But fortunately, HParams, a TensorBoard plugin come to the rescue. It allows to test and find the better combination of hyperparameters.\n",
15 | "\n",
16 | "We start by loading the necessary libraries"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": null,
22 | "metadata": {
23 | "colab": {},
24 | "colab_type": "code",
25 | "executionInfo": {
26 | "elapsed": 2033,
27 | "status": "ok",
28 | "timestamp": 1601103568199,
29 | "user": {
30 | "displayName": "Alexia Audevart",
31 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
32 | "userId": "09723170522858788865"
33 | },
34 | "user_tz": -120
35 | },
36 | "id": "j-RN2EtcapOK"
37 | },
38 | "outputs": [],
39 | "source": [
40 | "import tensorflow as tf\n",
41 | "from tensorboard.plugins.hparams import api as hp\n",
42 | "import numpy as np\n",
43 | "import datetime"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {
49 | "colab_type": "text",
50 | "id": "Hcxk-DWaapON"
51 | },
52 | "source": [
53 | "## Load and Prepare MNIST dataset\n"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {
60 | "colab": {
61 | "base_uri": "https://localhost:8080/",
62 | "height": 53
63 | },
64 | "colab_type": "code",
65 | "executionInfo": {
66 | "elapsed": 2836,
67 | "status": "ok",
68 | "timestamp": 1601103569010,
69 | "user": {
70 | "displayName": "Alexia Audevart",
71 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
72 | "userId": "09723170522858788865"
73 | },
74 | "user_tz": -120
75 | },
76 | "id": "xNoO10_1apOO",
77 | "outputId": "d9f2131d-a37c-40c3-c11a-13864cc83c91"
78 | },
79 | "outputs": [],
80 | "source": [
81 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
82 | "\n",
83 | "# Normalize\n",
84 | "x_train = x_train / 255\n",
85 | "x_test = x_test/ 255"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {
91 | "colab_type": "text",
92 | "id": "0sAZ6on-apOQ"
93 | },
94 | "source": [
95 | "## Hyperparameters"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {
102 | "colab": {},
103 | "colab_type": "code",
104 | "executionInfo": {
105 | "elapsed": 2829,
106 | "status": "ok",
107 | "timestamp": 1601103569012,
108 | "user": {
109 | "displayName": "Alexia Audevart",
110 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
111 | "userId": "09723170522858788865"
112 | },
113 | "user_tz": -120
114 | },
115 | "id": "r3ZLNn7-apOR"
116 | },
117 | "outputs": [],
118 | "source": [
119 | "HP_ARCHITECTURE_NN = hp.HParam('archi_nn', hp.Discrete(['128,64','256,128']))\n",
120 | "HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.0, 0.1))\n",
121 | "HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))"
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "metadata": {
127 | "colab_type": "text",
128 | "id": "2Xe_Guq2apOT"
129 | },
130 | "source": [
131 | "## Build the model\n",
132 | "We will use a simple sequential model on the `MNIST` dataset and we will configure the HParams callback."
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {
139 | "colab": {},
140 | "colab_type": "code",
141 | "executionInfo": {
142 | "elapsed": 2824,
143 | "status": "ok",
144 | "timestamp": 1601103569013,
145 | "user": {
146 | "displayName": "Alexia Audevart",
147 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
148 | "userId": "09723170522858788865"
149 | },
150 | "user_tz": -120
151 | },
152 | "id": "ecKMXKg8apOT"
153 | },
154 | "outputs": [],
155 | "source": [
156 | "def train_model(hparams, experiment_run_log_dir):\n",
157 | " \n",
158 | " nb_units = list(map(int, hparams[HP_ARCHITECTURE_NN].split(\",\")))\n",
159 | " \n",
160 | " model = tf.keras.models.Sequential()\n",
161 | " model.add(tf.keras.layers.Flatten(name=\"FLATTEN\"))\n",
162 | " model.add(tf.keras.layers.Dense(units=nb_units[0] , activation=\"relu\", name=\"D1\"))\n",
163 | " model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], name=\"DROP_OUT\"))\n",
164 | " model.add(tf.keras.layers.Dense(units=nb_units[1] , activation=\"relu\", name=\"D2\"))\n",
165 | " model.add(tf.keras.layers.Dense(units=10, activation=\"softmax\", name=\"OUTPUT\"))\n",
166 | " \n",
167 | " model.compile(\n",
168 | " optimizer=hparams[HP_OPTIMIZER], \n",
169 | " loss=\"sparse_categorical_crossentropy\",\n",
170 | " metrics=[\"accuracy\"]\n",
171 | " )\n",
172 | " \n",
173 | " tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=experiment_run_log_dir)\n",
174 | " hparams_callback = hp.KerasCallback(experiment_run_log_dir, hparams)\n",
175 | " \n",
176 | " model.fit(x=x_train, \n",
177 | " y=y_train, \n",
178 | " epochs=5,\n",
179 | " validation_data=(x_test, y_test),\n",
180 | " callbacks=[tensorboard_callback, hparams_callback]\n",
181 | " ) "
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {
187 | "colab_type": "text",
188 | "id": "JKSn-NWQapOV"
189 | },
190 | "source": [
191 | "Next, We will iterate on all the hyperparameters."
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "metadata": {
198 | "colab": {
199 | "base_uri": "https://localhost:8080/",
200 | "height": 432
201 | },
202 | "colab_type": "code",
203 | "executionInfo": {
204 | "elapsed": 4384,
205 | "status": "error",
206 | "timestamp": 1601103609691,
207 | "user": {
208 | "displayName": "Alexia Audevart",
209 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
210 | "userId": "09723170522858788865"
211 | },
212 | "user_tz": -120
213 | },
214 | "id": "L5lR7jPXapOW",
215 | "outputId": "c96b24bf-3899-48df-d036-012017e85834"
216 | },
217 | "outputs": [],
218 | "source": [
219 | "for archi_nn in HP_ARCHITECTURE_NN.domain.values:\n",
220 | " for optimizer in HP_OPTIMIZER.domain.values:\n",
221 | " for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value):\n",
222 | " hparams = {\n",
223 | " HP_ARCHITECTURE_NN : archi_nn, \n",
224 | " HP_OPTIMIZER: optimizer,\n",
225 | " HP_DROPOUT : dropout_rate\n",
226 | " }\n",
227 | " \n",
228 | " experiment_run_log_dir=\"logs/experiment-\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
229 | " \n",
230 | " train_model(hparams, experiment_run_log_dir)"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "metadata": {
236 | "colab_type": "text",
237 | "id": "yWweU7k8apOZ"
238 | },
239 | "source": [
240 | "## Start TensorBoard\n",
241 | "\n",
242 | "We then start the Tensorboard application by running the command:\n",
243 | "\n",
244 | "`$ tensorboard --logdir=\"logs\"`\n",
245 | "\n",
246 | "Then we navigate our browser to the folling link:\n",
247 | "\n",
248 | "`http://127.0.0.0:6006`\n",
249 | "\n",
250 | "Note that we can specify a different port if needed by passing for example a `--port 6007` command (for running on port 6007.\n",
251 | "\n",
252 | "\n",
253 | "We can also start TensorBoard within the notebook through the `%tensorboard --logdir=\"logs\"` command line \n",
254 | "\n",
255 | "> Note that Tensorboard will be viewable _*as*_ your program is running."
256 | ]
257 | },
258 | {
259 | "cell_type": "markdown",
260 | "metadata": {
261 | "colab_type": "text",
262 | "id": "-L3bGe4zapOZ"
263 | },
264 | "source": [
265 | "## Visualize the result in the HParams dashboard\n",
266 | "We can visualize the results (hyperparameters and metrics) for each run on the table view.
\n",
267 | "Note that filters and sorts can be applied on the left pane if needed."
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {
273 | "colab_type": "text",
274 | "id": "mGwuN-lxapOa"
275 | },
276 | "source": [
277 | "Here the screenshot of the table view:\n",
278 | "\n",
279 | ""
280 | ]
281 | },
282 | {
283 | "cell_type": "markdown",
284 | "metadata": {
285 | "colab_type": "text",
286 | "id": "7g8pfPFCapOa"
287 | },
288 | "source": [
289 | "On the parallel coordinates view, each axis represents a hyperparameter or a metric and each run is represented by a line.
\n",
290 | "This visualization allows to identify quickly the better hyperparameters combination.\n",
291 | "Here we show how to visualize a vector of parameters with a histogram summary.\n",
292 | "\n",
293 | ""
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": null,
299 | "metadata": {
300 | "colab": {},
301 | "colab_type": "code",
302 | "executionInfo": {
303 | "elapsed": 38080,
304 | "status": "aborted",
305 | "timestamp": 1601103604284,
306 | "user": {
307 | "displayName": "Alexia Audevart",
308 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
309 | "userId": "09723170522858788865"
310 | },
311 | "user_tz": -120
312 | },
313 | "id": "_A9rCTlbapOb"
314 | },
315 | "outputs": [],
316 | "source": []
317 | }
318 | ],
319 | "metadata": {
320 | "accelerator": "GPU",
321 | "colab": {
322 | "name": "02_Hyperparameter_tuning_with_HParams.ipynb",
323 | "provenance": []
324 | },
325 | "kernelspec": {
326 | "display_name": "Python 3",
327 | "language": "python",
328 | "name": "python3"
329 | },
330 | "language_info": {
331 | "codemirror_mode": {
332 | "name": "ipython",
333 | "version": 3
334 | },
335 | "file_extension": ".py",
336 | "mimetype": "text/x-python",
337 | "name": "python",
338 | "nbconvert_exporter": "python",
339 | "pygments_lexer": "ipython3",
340 | "version": "3.7.4"
341 | }
342 | },
343 | "nbformat": 4,
344 | "nbformat_minor": 4
345 | }
346 |
--------------------------------------------------------------------------------
/ch6/05_Implementing_Different_Layers/05_implementing_different_layers.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Implementing Different Layers\n",
8 | "\n",
9 | "\n",
10 | "We will illustrate how to use different types of layers in TensorFlow\n",
11 | "\n",
12 | "The layers of interest are:\n",
13 | "1. Convolutional Layer\n",
14 | "2. Activation Layer \n",
15 | "3. Max-Pool Layer\n",
16 | "4. Fully Connected Layer\n",
17 | "\n",
18 | "We will generate two different data sets for this script, a 1-D data set (row of data) and a 2-D data set (similar to picture)"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "import tensorflow as tf\n",
28 | "import numpy as np"
29 | ]
30 | },
31 | {
32 | "cell_type": "markdown",
33 | "metadata": {},
34 | "source": [
35 | "```\n",
36 | "#---------------------------------------------------|\n",
37 | "#-------------------1D-data-------------------------|\n",
38 | "#---------------------------------------------------|\n",
39 | "```"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {
46 | "collapsed": false,
47 | "jupyter": {
48 | "outputs_hidden": false
49 | }
50 | },
51 | "outputs": [],
52 | "source": [
53 | "# Initialize Parameters\n",
54 | "data_size = 25\n",
55 | "conv_size = 5\n",
56 | "maxpool_size = 5\n",
57 | "stride_size = 1\n",
58 | "num_outputs = 5\n",
59 | "\n",
60 | "# Create Input\n",
61 | "x_input_1d = tf.keras.Input(dtype=tf.float32, shape=(data_size,1), name=\"input_layer\")\n",
62 | "\n",
63 | "#--------Convolution--------\n",
64 | "# Create convolution layer\n",
65 | "my_conv_output = tf.keras.layers.Conv1D(kernel_size=(conv_size),\n",
66 | " filters=data_size, \n",
67 | " strides=stride_size, \n",
68 | " padding=\"VALID\",\n",
69 | " name=\"convolution_layer\")(x_input_1d)\n",
70 | "\n",
71 | "#--------Activation--------\n",
72 | "# Create activation layer\n",
73 | "my_activation_output = tf.keras.layers.ReLU(name=\"activation_layer\")(my_conv_output)\n",
74 | "\n",
75 | "#--------Max Pool--------\n",
76 | "# Perform the max pooling with strides = 1\n",
77 | "# If we wanted to increase the stride on our data dimension, say by\n",
78 | "# a factor of '2', we put strides = 2\n",
79 | "# We will also need to specify the width of the max-window ('maxpool_size')\n",
80 | "my_maxpool_output = tf.keras.layers.MaxPool1D(strides=stride_size,\n",
81 | " pool_size=maxpool_size,\n",
82 | " padding='VALID',\n",
83 | " name=\"maxpool_layer\")(my_activation_output)\n",
84 | "\n",
85 | "#--------Fully Connected--------\n",
86 | "# Create dense layer\n",
87 | "my_full_output = tf.keras.layers.Dense(units=num_outputs,\n",
88 | " name=\"fully_connected_layer\")(my_maxpool_output)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [
96 | {
97 | "name": "stdout",
98 | "output_type": "stream",
99 | "text": [
100 | ">>>> 1D Data <<<<\n",
101 | "Model: \"model_1D\"\n",
102 | "_________________________________________________________________\n",
103 | "Layer (type) Output Shape Param # \n",
104 | "=================================================================\n",
105 | "input_layer (InputLayer) [(None, 25, 1)] 0 \n",
106 | "_________________________________________________________________\n",
107 | "convolution_layer (Conv1D) (None, 21, 25) 150 \n",
108 | "_________________________________________________________________\n",
109 | "activation_layer (ReLU) (None, 21, 25) 0 \n",
110 | "_________________________________________________________________\n",
111 | "maxpool_layer (MaxPooling1D) (None, 17, 25) 0 \n",
112 | "_________________________________________________________________\n",
113 | "fully_connected_layer (Dense (None, 17, 5) 130 \n",
114 | "=================================================================\n",
115 | "Total params: 280\n",
116 | "Trainable params: 280\n",
117 | "Non-trainable params: 0\n",
118 | "_________________________________________________________________\n",
119 | "\n",
120 | "== input_layer ==\n",
121 | "Input = array of length 25\n",
122 | "\n",
123 | "== convolution_layer ==\n",
124 | "Convolution w/ filter, length = 5, stride size = 1, results in an array of length 21\n",
125 | "\n",
126 | "== activation_layer ==\n",
127 | "Input = above array of length 21\n",
128 | "ReLU element wise returns an array of length 21\n",
129 | "\n",
130 | "== maxpool_layer ==\n",
131 | "Input = above array of length 21\n",
132 | "MaxPool, window length = 5, stride size = 1, results in the array of length 17\n",
133 | "\n",
134 | "== fully_connected_layer ==\n",
135 | "Input = above array of length 17\n",
136 | "Fully connected layer on all 4 rows with 17 outputs\n"
137 | ]
138 | }
139 | ],
140 | "source": [
141 | "print('>>>> 1D Data <<<<')\n",
142 | "\n",
143 | "model_1D = tf.keras.Model(inputs=x_input_1d, outputs=my_full_output, name=\"model_1D\")\n",
144 | "model_1D.summary()\n",
145 | "\n",
146 | "# Input\n",
147 | "print('\\n== input_layer ==')\n",
148 | "print('Input = array of length %d' % (x_input_1d.shape.as_list()[1]))\n",
149 | "\n",
150 | "# Convolution \n",
151 | "print('\\n== convolution_layer ==')\n",
152 | "print('Convolution w/ filter, length = %d, stride size = %d, results in an array of length %d' % \n",
153 | " (conv_size,stride_size,my_conv_output.shape.as_list()[1]))\n",
154 | "\n",
155 | "# Activation \n",
156 | "print('\\n== activation_layer ==')\n",
157 | "print('Input = above array of length %d' % (my_conv_output.shape.as_list()[1]))\n",
158 | "print('ReLU element wise returns an array of length %d' % (my_activation_output.shape.as_list()[1]))\n",
159 | "\n",
160 | "# Max Pool \n",
161 | "print('\\n== maxpool_layer ==')\n",
162 | "print('Input = above array of length %d' % (my_activation_output.shape.as_list()[1]))\n",
163 | "print('MaxPool, window length = %d, stride size = %d, results in the array of length %d' %\n",
164 | " (maxpool_size,stride_size,my_maxpool_output.shape.as_list()[1]))\n",
165 | "\n",
166 | "# Fully Connected \n",
167 | "print('\\n== fully_connected_layer ==')\n",
168 | "print('Input = above array of length %d' % (my_maxpool_output.shape.as_list()[1]))\n",
169 | "print('Fully connected layer on all 4 rows with %d outputs' % \n",
170 | " (my_full_output.shape.as_list()[1]))"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {},
176 | "source": [
177 | "```\n",
178 | "#---------------------------------------------------|\n",
179 | "#-------------------2D-data-------------------------|\n",
180 | "#---------------------------------------------------|\n",
181 | "```"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {
188 | "collapsed": false,
189 | "jupyter": {
190 | "outputs_hidden": false
191 | }
192 | },
193 | "outputs": [],
194 | "source": [
195 | "# parameters for the run\n",
196 | "row_size = 10\n",
197 | "col_size = 10\n",
198 | "conv_size = 2\n",
199 | "conv_stride_size = 2\n",
200 | "maxpool_size = 2\n",
201 | "maxpool_stride_size = 1\n",
202 | "num_outputs = 5\n",
203 | "\n",
204 | "#--------Input--------\n",
205 | "x_input_2d = tf.keras.Input(dtype=tf.float32, shape=(row_size,col_size, 1), name=\"input_layer_2d\")\n",
206 | "\n",
207 | "#--------Convolution--------\n",
208 | "# Create convolution layer\n",
209 | "my_convolution_output_2d = tf.keras.layers.Conv2D(kernel_size=(conv_size),\n",
210 | " filters=conv_size,\n",
211 | " strides=conv_stride_size,\n",
212 | " padding=\"VALID\",\n",
213 | " name=\"convolution_layer_2d\")(x_input_2d)\n",
214 | "\n",
215 | "\n",
216 | "#--------Activation--------\n",
217 | "# Create activation layer\n",
218 | "my_activation_output_2d = tf.keras.layers.ReLU(name=\"activation_layer_2d\")(my_convolution_output_2d)\n",
219 | "\n",
220 | "#--------Max Pool--------\n",
221 | "# Perform the max pooling with strides = 1\n",
222 | "# If we wanted to increase the stride on our data dimension, say by\n",
223 | "# a factor of '2', we put strides = 2\n",
224 | "# We will also need to specify the width of the max-window ('maxpool_size')\n",
225 | "my_maxpool_output_2d = tf.keras.layers.MaxPool2D(strides=maxpool_stride_size,\n",
226 | " pool_size=maxpool_size,\n",
227 | " padding='VALID',\n",
228 | " name=\"maxpool_layer_2d\")(my_activation_output_2d)\n",
229 | "\n",
230 | "\n",
231 | "#--------Fully Connected--------\n",
232 | "# Create dense layer\n",
233 | "my_full_output_2d = tf.keras.layers.Dense(units=num_outputs,\n",
234 | " name=\"fully_connected_layer_2d\")(my_maxpool_output_2d)"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": null,
240 | "metadata": {
241 | "collapsed": false,
242 | "jupyter": {
243 | "outputs_hidden": false
244 | }
245 | },
246 | "outputs": [
247 | {
248 | "name": "stdout",
249 | "output_type": "stream",
250 | "text": [
251 | ">>>> 2D Data <<<<\n",
252 | "Model: \"model_2D\"\n",
253 | "_________________________________________________________________\n",
254 | "Layer (type) Output Shape Param # \n",
255 | "=================================================================\n",
256 | "input_layer_2d (InputLayer) [(None, 10, 10, 1)] 0 \n",
257 | "_________________________________________________________________\n",
258 | "convolution_layer_2d (Conv2D (None, 5, 5, 2) 10 \n",
259 | "_________________________________________________________________\n",
260 | "activation_layer_2d (ReLU) (None, 5, 5, 2) 0 \n",
261 | "_________________________________________________________________\n",
262 | "maxpool_layer_2d (MaxPooling (None, 4, 4, 2) 0 \n",
263 | "_________________________________________________________________\n",
264 | "fully_connected_layer_2d (De (None, 4, 4, 5) 15 \n",
265 | "=================================================================\n",
266 | "Total params: 25\n",
267 | "Trainable params: 25\n",
268 | "Non-trainable params: 0\n",
269 | "_________________________________________________________________\n",
270 | "\n",
271 | "== input_layer ==\n",
272 | "Input = [10, 10] array\n",
273 | "\n",
274 | "== convolution_layer ==\n",
275 | "[2, 2] Convolution, stride size = [2, 2] , results in the [5, 5] array\n",
276 | "\n",
277 | "== activation_layer ==\n",
278 | "Input = the above [5, 5] array\n",
279 | "ReLU element wise returns the [5, 5] array\n",
280 | "\n",
281 | "== maxpool_layer ==\n",
282 | "Input = the above [5, 5] array\n",
283 | "MaxPool, stride size = [1, 1], results in [4, 4] array\n",
284 | "\n",
285 | "== fully_connected_layer ==\n",
286 | "Input = the above [4, 4] array\n",
287 | "Fully connected layer on all 4 rows results in 5 outputs\n"
288 | ]
289 | }
290 | ],
291 | "source": [
292 | "print('>>>> 2D Data <<<<')\n",
293 | "\n",
294 | "model_2D = tf.keras.Model(inputs=x_input_2d, outputs=my_full_output_2d, name=\"model_2D\")\n",
295 | "model_2D.summary()\n",
296 | "\n",
297 | "# Input \n",
298 | "print('\\n== input_layer ==')\n",
299 | "print('Input = %s array' % (x_input_2d.shape.as_list()[1:3]))\n",
300 | "\n",
301 | "# Convolution\n",
302 | "print('\\n== convolution_layer ==')\n",
303 | "print('%s Convolution, stride size = [%d, %d] , results in the %s array' % \n",
304 | " ([conv_size,conv_size],conv_stride_size,conv_stride_size,my_convolution_output_2d.shape.as_list()[1:3]))\n",
305 | "\n",
306 | "# Activation\n",
307 | "print('\\n== activation_layer ==')\n",
308 | "print('Input = the above %s array' % (my_convolution_output_2d.shape.as_list()[1:3]))\n",
309 | "print('ReLU element wise returns the %s array' % (my_activation_output_2d.shape.as_list()[1:3]))\n",
310 | "\n",
311 | "# Max Pool\n",
312 | "print('\\n== maxpool_layer ==')\n",
313 | "print('Input = the above %s array' % (my_activation_output_2d.shape.as_list()[1:3]))\n",
314 | "print('MaxPool, stride size = [%d, %d], results in %s array' % \n",
315 | " (maxpool_stride_size,maxpool_stride_size,my_maxpool_output_2d.shape.as_list()[1:3]))\n",
316 | "\n",
317 | "# Fully Connected\n",
318 | "print('\\n== fully_connected_layer ==')\n",
319 | "print('Input = the above %s array' % (my_maxpool_output_2d.shape.as_list()[1:3]))\n",
320 | "print('Fully connected layer on all %d rows results in %s outputs' % \n",
321 | " (my_maxpool_output_2d.shape.as_list()[1],my_full_output_2d.shape.as_list()[3]))"
322 | ]
323 | },
324 | {
325 | "cell_type": "code",
326 | "execution_count": null,
327 | "metadata": {},
328 | "outputs": [],
329 | "source": []
330 | }
331 | ],
332 | "metadata": {
333 | "anaconda-cloud": {},
334 | "kernelspec": {
335 | "display_name": "tf2",
336 | "language": "python",
337 | "name": "tf2"
338 | },
339 | "language_info": {
340 | "codemirror_mode": {
341 | "name": "ipython",
342 | "version": 3
343 | },
344 | "file_extension": ".py",
345 | "mimetype": "text/x-python",
346 | "name": "python",
347 | "nbconvert_exporter": "python",
348 | "pygments_lexer": "ipython3",
349 | "version": "3.7.7"
350 | }
351 | },
352 | "nbformat": 4,
353 | "nbformat_minor": 4
354 | }
355 |
--------------------------------------------------------------------------------
/ch12/04_Using_Multiple_Devices/04_using_multiple_devices.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "KPo4692KTNvc"
8 | },
9 | "source": [
10 | "# Using Multiple devices\n",
11 | "\n",
12 | "An easy way to have a GPU is to run the code in Google Colab and set GPU as the hardware accelerator in the notebook settings."
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "colab": {},
20 | "colab_type": "code",
21 | "executionInfo": {
22 | "elapsed": 2358,
23 | "status": "ok",
24 | "timestamp": 1601104038895,
25 | "user": {
26 | "displayName": "Alexia Audevart",
27 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
28 | "userId": "09723170522858788865"
29 | },
30 | "user_tz": -120
31 | },
32 | "id": "qvtaetE0TNvd"
33 | },
34 | "outputs": [],
35 | "source": [
36 | "import tensorflow as tf"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {
43 | "colab": {
44 | "base_uri": "https://localhost:8080/",
45 | "height": 35
46 | },
47 | "colab_type": "code",
48 | "executionInfo": {
49 | "elapsed": 3655,
50 | "status": "ok",
51 | "timestamp": 1601104040202,
52 | "user": {
53 | "displayName": "Alexia Audevart",
54 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
55 | "userId": "09723170522858788865"
56 | },
57 | "user_tz": -120
58 | },
59 | "id": "WShhcn92TNvg",
60 | "outputId": "58885c06-46e8-4300-ec01-4628abf15578"
61 | },
62 | "outputs": [],
63 | "source": [
64 | "print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "colab_type": "text",
71 | "id": "dLx_JoDFTNvk"
72 | },
73 | "source": [
74 | "## Find out where placement occurs\n",
75 | "\n",
76 | "If a TensorFlow operation is implemented for CPU and GPU devices, the operation will be executed by default on a GPU device if a GPU is available."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "colab": {
84 | "base_uri": "https://localhost:8080/",
85 | "height": 71
86 | },
87 | "colab_type": "code",
88 | "executionInfo": {
89 | "elapsed": 3282,
90 | "status": "ok",
91 | "timestamp": 1601103854192,
92 | "user": {
93 | "displayName": "Alexia Audevart",
94 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
95 | "userId": "09723170522858788865"
96 | },
97 | "user_tz": -120
98 | },
99 | "id": "AehjcrN8TNvk",
100 | "outputId": "7c021a49-0b7e-47c8-a6cf-186d366ca7cf"
101 | },
102 | "outputs": [],
103 | "source": [
104 | "# To find out where placement occurs, set 'log_device_placement'\n",
105 | "tf.debugging.set_log_device_placement(True)\n",
106 | "\n",
107 | "a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
108 | "b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
109 | "c = tf.matmul(a, b)"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {
115 | "colab_type": "text",
116 | "id": "lrFpeKpCTNvm"
117 | },
118 | "source": [
119 | "We can also use the tensor `device` attribute that returns the name of the device on which this tensor will be assigned."
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": null,
125 | "metadata": {
126 | "colab": {
127 | "base_uri": "https://localhost:8080/",
128 | "height": 89
129 | },
130 | "colab_type": "code",
131 | "executionInfo": {
132 | "elapsed": 3276,
133 | "status": "ok",
134 | "timestamp": 1601103854193,
135 | "user": {
136 | "displayName": "Alexia Audevart",
137 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
138 | "userId": "09723170522858788865"
139 | },
140 | "user_tz": -120
141 | },
142 | "id": "IHRue5i9TNvn",
143 | "outputId": "9c7af1eb-ecfd-452f-fa59-e2f2af187acb"
144 | },
145 | "outputs": [],
146 | "source": [
147 | "a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
148 | "print(a.device)\n",
149 | "b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
150 | "print(b.device)"
151 | ]
152 | },
153 | {
154 | "cell_type": "markdown",
155 | "metadata": {
156 | "colab_type": "text",
157 | "id": "_MakEc47TNvp"
158 | },
159 | "source": [
160 | "## Create a device context\n",
161 | "\n",
162 | "We can select the device to use by creating a device context through the `with tf.device` function.\n",
163 | "Each operation executed in this context will use the selected device."
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": null,
169 | "metadata": {
170 | "colab": {
171 | "base_uri": "https://localhost:8080/",
172 | "height": 71
173 | },
174 | "colab_type": "code",
175 | "executionInfo": {
176 | "elapsed": 3269,
177 | "status": "ok",
178 | "timestamp": 1601103854194,
179 | "user": {
180 | "displayName": "Alexia Audevart",
181 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
182 | "userId": "09723170522858788865"
183 | },
184 | "user_tz": -120
185 | },
186 | "id": "4afJYTFiTNvq",
187 | "outputId": "3a786bc3-a1bc-479a-b9d2-6b2dfb418827"
188 | },
189 | "outputs": [],
190 | "source": [
191 | "tf.debugging.set_log_device_placement(True)\n",
192 | "with tf.device('/device:CPU:0'):\n",
193 | " a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
194 | " b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
195 | " c = tf.matmul(a, b)"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "metadata": {
201 | "colab_type": "text",
202 | "id": "v9OlP2tSTNvs"
203 | },
204 | "source": [
205 | "If we move the `matmul` operation out of the context. This operation will be executed on a GPU device if it's available."
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {
212 | "colab": {
213 | "base_uri": "https://localhost:8080/",
214 | "height": 71
215 | },
216 | "colab_type": "code",
217 | "executionInfo": {
218 | "elapsed": 3262,
219 | "status": "ok",
220 | "timestamp": 1601103854194,
221 | "user": {
222 | "displayName": "Alexia Audevart",
223 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
224 | "userId": "09723170522858788865"
225 | },
226 | "user_tz": -120
227 | },
228 | "id": "c0ClsH2mTNvs",
229 | "outputId": "f44dd974-1727-4179-9de2-ca1343c991dc"
230 | },
231 | "outputs": [],
232 | "source": [
233 | "tf.debugging.set_log_device_placement(True)\n",
234 | "with tf.device('/device:CPU:0'):\n",
235 | " a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n",
236 | " b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n",
237 | "c = tf.matmul(a, b)"
238 | ]
239 | },
240 | {
241 | "cell_type": "markdown",
242 | "metadata": {
243 | "colab_type": "text",
244 | "id": "mvQS_fphTNvv"
245 | },
246 | "source": [
247 | "## Limit the GPU memory allocation\n",
248 | "Careful with GPU memory allocation, TensorFlow never releases it. TensorFlow starts with almost\n",
249 | "all of the GPU memory allocated. \n",
250 | "\n",
251 | "We can slowly grow to that limit with the `tf.config.experimental.set_memory_growth` method option setting or another solution is to set the environmental variable `TF_FORCE_GPU_ALLOW_GROWTH` to `True`."
252 | ]
253 | },
254 | {
255 | "cell_type": "code",
256 | "execution_count": null,
257 | "metadata": {
258 | "colab": {
259 | "base_uri": "https://localhost:8080/",
260 | "height": 35
261 | },
262 | "colab_type": "code",
263 | "executionInfo": {
264 | "elapsed": 3255,
265 | "status": "ok",
266 | "timestamp": 1601103854195,
267 | "user": {
268 | "displayName": "Alexia Audevart",
269 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
270 | "userId": "09723170522858788865"
271 | },
272 | "user_tz": -120
273 | },
274 | "id": "dn8NCGARTNvv",
275 | "outputId": "824a285b-c914-4873-dce5-eb681e486991"
276 | },
277 | "outputs": [],
278 | "source": [
279 | "gpu_devices = tf.config.list_physical_devices('GPU')\n",
280 | "if gpu_devices:\n",
281 | " try:\n",
282 | " tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n",
283 | " except RuntimeError as e:\n",
284 | " # Memory growth cannot be modified after GPU has been initialized\n",
285 | " print(e)"
286 | ]
287 | },
288 | {
289 | "cell_type": "markdown",
290 | "metadata": {
291 | "colab_type": "text",
292 | "id": "yKD30BsITNvx"
293 | },
294 | "source": [
295 | "We can also create a virtual GPU device with `tf.config.experimental.set_virtual_device_configuration` and set the maximum memory limit (in MB) to allocate on this virtual GPU."
296 | ]
297 | },
298 | {
299 | "cell_type": "code",
300 | "execution_count": null,
301 | "metadata": {
302 | "colab": {
303 | "base_uri": "https://localhost:8080/",
304 | "height": 35
305 | },
306 | "colab_type": "code",
307 | "executionInfo": {
308 | "elapsed": 3248,
309 | "status": "ok",
310 | "timestamp": 1601103854196,
311 | "user": {
312 | "displayName": "Alexia Audevart",
313 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
314 | "userId": "09723170522858788865"
315 | },
316 | "user_tz": -120
317 | },
318 | "id": "jKcwcnltTNvy",
319 | "outputId": "5d12ce1c-8719-4b30-dbc8-86857d16116a"
320 | },
321 | "outputs": [],
322 | "source": [
323 | "gpu_devices = tf.config.list_physical_devices('GPU')\n",
324 | "if gpu_devices:\n",
325 | " try:\n",
326 | " tf.config.experimental.set_virtual_device_configuration(gpu_devices[0],\n",
327 | " [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])\n",
328 | " except RuntimeError as e:\n",
329 | " # Memory growth cannot be modified after GPU has been initialized\n",
330 | " print(e)"
331 | ]
332 | },
333 | {
334 | "cell_type": "markdown",
335 | "metadata": {
336 | "colab_type": "text",
337 | "id": "O9QPhCzlTNv0"
338 | },
339 | "source": [
340 | "# Using multiple GPUs\n",
341 | "\n",
342 | "We can set placements on multiple devices.\n",
343 | "Here, assume we have three devices CPU:0, GPU:0, and GPU:1"
344 | ]
345 | },
346 | {
347 | "cell_type": "code",
348 | "execution_count": null,
349 | "metadata": {
350 | "colab": {
351 | "base_uri": "https://localhost:8080/",
352 | "height": 179
353 | },
354 | "colab_type": "code",
355 | "executionInfo": {
356 | "elapsed": 9305,
357 | "status": "ok",
358 | "timestamp": 1601104045861,
359 | "user": {
360 | "displayName": "Alexia Audevart",
361 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
362 | "userId": "09723170522858788865"
363 | },
364 | "user_tz": -120
365 | },
366 | "id": "SzdBn80sTNv1",
367 | "outputId": "eef64401-c89f-41e7-c2c4-6dc804b294bb"
368 | },
369 | "outputs": [],
370 | "source": [
371 | "# Create two virtual GPUs\n",
372 | "gpu_devices = tf.config.list_physical_devices('GPU')\n",
373 | "tf.debugging.set_log_device_placement(True)\n",
374 | "if gpu_devices:\n",
375 | " try:\n",
376 | " tf.config.experimental.set_virtual_device_configuration(gpu_devices[0],\n",
377 | " [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024),\n",
378 | " tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024) ])\n",
379 | " except RuntimeError as e:\n",
380 | " # Memory growth cannot be modified after GPU has been initialized\n",
381 | " print(e)\n",
382 | "\n",
383 | "print(\"Num GPUs Available: \", len(tf.config.list_logical_devices('GPU')))\n",
384 | "\n",
385 | "if tf.test.is_built_with_cuda():\n",
386 | " with tf.device('/cpu:0'):\n",
387 | " a = tf.constant([1.0, 3.0, 5.0], shape=[1, 3])\n",
388 | " b = tf.constant([2.0, 4.0, 6.0], shape=[3, 1])\n",
389 | " \n",
390 | " with tf.device('/gpu:0'):\n",
391 | " c = tf.matmul(a,b)\n",
392 | " c = tf.reshape(c, [-1])\n",
393 | " \n",
394 | " with tf.device('/gpu:1'):\n",
395 | " d = tf.matmul(b,a)\n",
396 | " flat_d = tf.reshape(d, [-1])\n",
397 | " \n",
398 | " combined = tf.multiply(c, flat_d)\n",
399 | " print(combined)\n"
400 | ]
401 | },
402 | {
403 | "cell_type": "code",
404 | "execution_count": null,
405 | "metadata": {
406 | "colab": {},
407 | "colab_type": "code",
408 | "executionInfo": {
409 | "elapsed": 3234,
410 | "status": "ok",
411 | "timestamp": 1601103854197,
412 | "user": {
413 | "displayName": "Alexia Audevart",
414 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
415 | "userId": "09723170522858788865"
416 | },
417 | "user_tz": -120
418 | },
419 | "id": "F45O8whtVKxZ"
420 | },
421 | "outputs": [],
422 | "source": []
423 | }
424 | ],
425 | "metadata": {
426 | "accelerator": "GPU",
427 | "colab": {
428 | "collapsed_sections": [],
429 | "name": "04_using_multiple_devices.ipynb",
430 | "provenance": []
431 | },
432 | "kernelspec": {
433 | "display_name": "Python 3",
434 | "language": "python",
435 | "name": "python3"
436 | },
437 | "language_info": {
438 | "codemirror_mode": {
439 | "name": "ipython",
440 | "version": 3
441 | },
442 | "file_extension": ".py",
443 | "mimetype": "text/x-python",
444 | "name": "python",
445 | "nbconvert_exporter": "python",
446 | "pygments_lexer": "ipython3",
447 | "version": "3.7.4"
448 | }
449 | },
450 | "nbformat": 4,
451 | "nbformat_minor": 4
452 | }
453 |
--------------------------------------------------------------------------------
/ch12/01_Visualizing_Computational_Graphs/01_using_tensorboard.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "cuBY4N9aauen"
8 | },
9 | "source": [
10 | "# Using Tensorboard\n",
11 | "-----------------------------\n",
12 | "\n",
13 | "Neural networks are often criticized because it's difficult to understand how they learn. \n",
14 | "TensorBoard is a powerful visualization tool that allows to open the black box and go inside the model.\n",
15 | "\n",
16 | "We'll illustrate the various uses of Tensorboard in this script.\n",
17 | " 1. Visualize scalars, distributions, images and histograms\n",
18 | " 2. Visualize TensorFlow model\n",
19 | "\n",
20 | "We start by loading the necessary libraries!"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {
27 | "colab": {},
28 | "colab_type": "code",
29 | "executionInfo": {
30 | "elapsed": 2584,
31 | "status": "ok",
32 | "timestamp": 1601103010785,
33 | "user": {
34 | "displayName": "Alexia Audevart",
35 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
36 | "userId": "09723170522858788865"
37 | },
38 | "user_tz": -120
39 | },
40 | "id": "a4BR1z7Haueo"
41 | },
42 | "outputs": [],
43 | "source": [
44 | "import tensorflow as tf\n",
45 | "import numpy as np\n",
46 | "import datetime"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {
52 | "colab_type": "text",
53 | "id": "ss7rBM3Wauer"
54 | },
55 | "source": [
56 | "## Build MNIST Model\n",
57 | "To illustrate the various ways we can use TensorBoard, we will reimplement the `MNIST` model from `The Introductory CNN Model` in `Chapter 8, Convolutional Neural Network`."
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {
64 | "colab": {
65 | "base_uri": "https://localhost:8080/",
66 | "height": 53
67 | },
68 | "colab_type": "code",
69 | "executionInfo": {
70 | "elapsed": 3997,
71 | "status": "ok",
72 | "timestamp": 1601103012205,
73 | "user": {
74 | "displayName": "Alexia Audevart",
75 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
76 | "userId": "09723170522858788865"
77 | },
78 | "user_tz": -120
79 | },
80 | "id": "PkHclYjwauer",
81 | "outputId": "b56961b8-fb68-4f81-b287-c4df32874901"
82 | },
83 | "outputs": [],
84 | "source": [
85 | "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
86 | "\n",
87 | "x_train = x_train.reshape(-1, 28, 28, 1)\n",
88 | "x_test = x_test.reshape(-1, 28, 28, 1)\n",
89 | "\n",
90 | "# Padding the images by 2 pixels since in the paper input images were 32x32\n",
91 | "x_train = np.pad(x_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n",
92 | "x_test = np.pad(x_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n",
93 | "\n",
94 | "# Normalize\n",
95 | "x_train = x_train / 255\n",
96 | "x_test = x_test/ 255\n",
97 | "\n",
98 | "# Set model parameters\n",
99 | "image_width = x_train[0].shape[0]\n",
100 | "image_height = x_train[0].shape[1]\n",
101 | "num_channels = 1 # grayscale = 1 channel\n",
102 | "\n",
103 | "# Training and Test data variables\n",
104 | "batch_size = 100\n",
105 | "evaluation_size = 500\n",
106 | "generations = 300\n",
107 | "eval_every = 5\n",
108 | "\n",
109 | "# Set for reproducible results\n",
110 | "seed = 98\n",
111 | "np.random.seed(seed)\n",
112 | "tf.random.set_seed(seed)\n",
113 | "\n",
114 | "# Declare the model\n",
115 | "input_data = tf.keras.Input(dtype=tf.float32, shape=(image_width,image_height, num_channels), name=\"INPUT\")\n",
116 | "\n",
117 | "# First Conv-ReLU-MaxPool Layer\n",
118 | "conv1 = tf.keras.layers.Conv2D(filters=6,\n",
119 | " kernel_size=5,\n",
120 | " padding='VALID',\n",
121 | " activation=\"relu\",\n",
122 | " name=\"C1\")(input_data)\n",
123 | "\n",
124 | "max_pool1 = tf.keras.layers.MaxPool2D(pool_size=2,\n",
125 | " strides=2, \n",
126 | " padding='SAME',\n",
127 | " name=\"S1\")(conv1)\n",
128 | "\n",
129 | "# Second Conv-ReLU-MaxPool Layer\n",
130 | "conv2 = tf.keras.layers.Conv2D(filters=16,\n",
131 | " kernel_size=5,\n",
132 | " padding='VALID',\n",
133 | " strides=1,\n",
134 | " activation=\"relu\",\n",
135 | " name=\"C3\")(max_pool1)\n",
136 | "\n",
137 | "max_pool2 = tf.keras.layers.MaxPool2D(pool_size=2,\n",
138 | " strides=2, \n",
139 | " padding='SAME',\n",
140 | " name=\"S4\")(conv2)\n",
141 | "\n",
142 | "# Flatten Layer\n",
143 | "flatten = tf.keras.layers.Flatten(name=\"FLATTEN\")(max_pool2)\n",
144 | "\n",
145 | "\n",
146 | "# First Fully Connected Layer\n",
147 | "fully_connected1 = tf.keras.layers.Dense(units=120,\n",
148 | " activation=\"relu\",\n",
149 | " name=\"F5\")(flatten)\n",
150 | "\n",
151 | "# Second Fully Connected Layer\n",
152 | "fully_connected2 = tf.keras.layers.Dense(units=84,\n",
153 | " activation=\"relu\",\n",
154 | " name=\"F6\")(fully_connected1)\n",
155 | "\n",
156 | "# Final Fully Connected Layer\n",
157 | "final_model_output = tf.keras.layers.Dense(units=10,\n",
158 | " activation=\"softmax\",\n",
159 | " name=\"OUTPUT\"\n",
160 | " )(fully_connected2)\n",
161 | " \n",
162 | "\n",
163 | "model = tf.keras.Model(inputs= input_data, outputs=final_model_output)"
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {
169 | "colab_type": "text",
170 | "id": "GsQLNGNQaueu"
171 | },
172 | "source": [
173 | "Next, we will compile the model with the sparse categorical cross-entropy loss and the ADAM optimizer.\n",
174 | "Then, we'll display the summary"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": null,
180 | "metadata": {
181 | "colab": {},
182 | "colab_type": "code",
183 | "executionInfo": {
184 | "elapsed": 3996,
185 | "status": "ok",
186 | "timestamp": 1601103012206,
187 | "user": {
188 | "displayName": "Alexia Audevart",
189 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
190 | "userId": "09723170522858788865"
191 | },
192 | "user_tz": -120
193 | },
194 | "id": "KhMHUZi7aueu"
195 | },
196 | "outputs": [],
197 | "source": [
198 | "model.compile(\n",
199 | " optimizer=\"adam\", \n",
200 | " loss=\"sparse_categorical_crossentropy\",\n",
201 | " metrics=[\"accuracy\"]\n",
202 | ")"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {
209 | "colab": {
210 | "base_uri": "https://localhost:8080/",
211 | "height": 485
212 | },
213 | "colab_type": "code",
214 | "executionInfo": {
215 | "elapsed": 476,
216 | "status": "ok",
217 | "timestamp": 1601103118658,
218 | "user": {
219 | "displayName": "Alexia Audevart",
220 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
221 | "userId": "09723170522858788865"
222 | },
223 | "user_tz": -120
224 | },
225 | "id": "Ug68Q9wRauex",
226 | "outputId": "084e899e-84b3-43ef-c91f-622dca6d5dc2"
227 | },
228 | "outputs": [],
229 | "source": [
230 | "model.summary()"
231 | ]
232 | },
233 | {
234 | "cell_type": "markdown",
235 | "metadata": {
236 | "colab_type": "text",
237 | "id": "EaY8p0wAaue0"
238 | },
239 | "source": [
240 | "## Add TensorBoard Callback & Fit the model\n",
241 | "\n",
242 | "We will create a timestamped subdirectory for each runs."
243 | ]
244 | },
245 | {
246 | "cell_type": "code",
247 | "execution_count": null,
248 | "metadata": {
249 | "colab": {},
250 | "colab_type": "code",
251 | "executionInfo": {
252 | "elapsed": 540,
253 | "status": "ok",
254 | "timestamp": 1601103120266,
255 | "user": {
256 | "displayName": "Alexia Audevart",
257 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
258 | "userId": "09723170522858788865"
259 | },
260 | "user_tz": -120
261 | },
262 | "id": "DOFGJsdNaue0"
263 | },
264 | "outputs": [],
265 | "source": [
266 | "log_dir=\"logs/experiment\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")"
267 | ]
268 | },
269 | {
270 | "cell_type": "markdown",
271 | "metadata": {
272 | "colab_type": "text",
273 | "id": "lSBNaKMiaue2"
274 | },
275 | "source": [
276 | "Next, we will instantiate a TensorBoard callback and pass it to the fit method. All logs during the training phase will be stored in this directory and can be able to view instantly in TensorBoard."
277 | ]
278 | },
279 | {
280 | "cell_type": "code",
281 | "execution_count": null,
282 | "metadata": {
283 | "colab": {
284 | "base_uri": "https://localhost:8080/",
285 | "height": 289
286 | },
287 | "colab_type": "code",
288 | "executionInfo": {
289 | "elapsed": 203610,
290 | "status": "ok",
291 | "timestamp": 1601103325170,
292 | "user": {
293 | "displayName": "Alexia Audevart",
294 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
295 | "userId": "09723170522858788865"
296 | },
297 | "user_tz": -120
298 | },
299 | "id": "jWStwrXoaue3",
300 | "outputId": "fc8d67ed-b00f-4b05-e35b-95af3c0b82d4"
301 | },
302 | "outputs": [],
303 | "source": [
304 | "tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, \n",
305 | " write_images=True,\n",
306 | " histogram_freq=1 )\n",
307 | "\n",
308 | "model.fit(x=x_train, \n",
309 | " y=y_train, \n",
310 | " epochs=5,\n",
311 | " validation_data=(x_test, y_test), \n",
312 | " callbacks=[tensorboard_callback])"
313 | ]
314 | },
315 | {
316 | "cell_type": "markdown",
317 | "metadata": {
318 | "colab_type": "text",
319 | "id": "jSECLsu-aue5"
320 | },
321 | "source": [
322 | "## Start TensorBoard\n",
323 | "\n",
324 | "We then start the Tensorboard application by running the command:\n",
325 | "\n",
326 | "`$ tensorboard --logdir=\"logs\"`\n",
327 | "\n",
328 | "Then we navigate our browser to the following link:\n",
329 | "\n",
330 | "`http://127.0.0.0:6006`\n",
331 | "\n",
332 | "We can specify a different port if needed by passing for example a `--port 6007` command (for running on port 6007).\n",
333 | "\n",
334 | "We can also start TensorBoard within the notebook through the `%tensorboard --logdir=\"logs\"` command line \n",
335 | "\n",
336 | "> Note that TensorBoard will be viewable _*as*_ your program is running."
337 | ]
338 | },
339 | {
340 | "cell_type": "markdown",
341 | "metadata": {
342 | "colab_type": "text",
343 | "id": "H1ZzQz77aue6"
344 | },
345 | "source": [
346 | "## Visualize scalars, distributions, images and histograms\n",
347 | "We can quickly and easily visualize and compare metrics of several experiments during the model training through the TensorBoard's scalars view. By default, TensorBoard writes the metrics and losses every epoch. We can update this frequency by batch using the following argument \n",
348 | "`update_freq='batch'`. \n",
349 | "\n",
350 | "We can also visualize model weights as image with the argument `write_images=True` or display bias and weights with histograms (computation every epoch) using `histogram_freq=1 `.\n"
351 | ]
352 | },
353 | {
354 | "cell_type": "markdown",
355 | "metadata": {
356 | "colab_type": "text",
357 | "id": "B-RF2_24aue6"
358 | },
359 | "source": [
360 | "Here the screenshot of the scalars view:\n",
361 | "\n",
362 | ""
363 | ]
364 | },
365 | {
366 | "cell_type": "markdown",
367 | "metadata": {
368 | "colab_type": "text",
369 | "id": "38mJ6ttOaue7"
370 | },
371 | "source": [
372 | "Here we show how to visualize weights and bias with a histogram summary.\n",
373 | "\n",
374 | ""
375 | ]
376 | },
377 | {
378 | "cell_type": "markdown",
379 | "metadata": {
380 | "colab_type": "text",
381 | "id": "cvYGQtg4aue7"
382 | },
383 | "source": [
384 | "## Visualize TensorFlow model\n",
385 | "The TensorFlow's Graphs dashboard shows the model using different views. \n",
386 | "\n",
387 | "This dashboard allows to visualize the op-level graph but also the conceptual graph that displays only the Keras model without extra edges to other computation nodes.\n",
388 | "\n",
389 | "These views allow to quickly examinate and compare our intended design and how TensorFlow understands the model structure.\n",
390 | "\n",
391 | "Here we show how to visualize the op-level graph.\n",
392 | "\n",
393 | ""
394 | ]
395 | },
396 | {
397 | "cell_type": "markdown",
398 | "metadata": {
399 | "colab_type": "text",
400 | "id": "kIB8wVRcaue8"
401 | },
402 | "source": [
403 | "## Use file writer\n",
404 | "\n",
405 | "Create a FileWriter for the timestamped log directory and write the top ten images. "
406 | ]
407 | },
408 | {
409 | "cell_type": "code",
410 | "execution_count": null,
411 | "metadata": {
412 | "colab": {},
413 | "colab_type": "code",
414 | "executionInfo": {
415 | "elapsed": 169507,
416 | "status": "ok",
417 | "timestamp": 1601103325174,
418 | "user": {
419 | "displayName": "Alexia Audevart",
420 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
421 | "userId": "09723170522858788865"
422 | },
423 | "user_tz": -120
424 | },
425 | "id": "jP4pvxk7aue8"
426 | },
427 | "outputs": [],
428 | "source": [
429 | "\n",
430 | "file_writer = tf.summary.create_file_writer(log_dir)\n",
431 | "\n",
432 | "with file_writer.as_default():\n",
433 | " # Reshape the images and write the image summary\n",
434 | " images = np.reshape(x_train[0:10], (-1, 32, 32, 1))\n",
435 | " tf.summary.image(\"10 training data examples\", images, max_outputs=10, step=0)"
436 | ]
437 | },
438 | {
439 | "cell_type": "markdown",
440 | "metadata": {
441 | "colab_type": "text",
442 | "id": "YsNlrmd7aue-"
443 | },
444 | "source": [
445 | "Here we show how to visualize the top ten images.\n",
446 | "\n",
447 | "\n"
448 | ]
449 | }
450 | ],
451 | "metadata": {
452 | "colab": {
453 | "name": "01_using_tensorboard.ipynb",
454 | "provenance": []
455 | },
456 | "kernelspec": {
457 | "display_name": "Python 3",
458 | "language": "python",
459 | "name": "python3"
460 | },
461 | "language_info": {
462 | "codemirror_mode": {
463 | "name": "ipython",
464 | "version": 3
465 | },
466 | "file_extension": ".py",
467 | "mimetype": "text/x-python",
468 | "name": "python",
469 | "nbconvert_exporter": "python",
470 | "pygments_lexer": "ipython3",
471 | "version": "3.7.4"
472 | }
473 | },
474 | "nbformat": 4,
475 | "nbformat_minor": 4
476 | }
477 |
--------------------------------------------------------------------------------
/ch3/02-keras-sequential_api.ipynb:
--------------------------------------------------------------------------------
1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"accelerator":"GPU","colab":{"name":"02-keras-sequential_api.ipynb","provenance":[],"collapsed_sections":[]},"kernelspec":{"display_name":"tf2","language":"python","name":"tf2"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.7.7"}},"cells":[{"cell_type":"markdown","metadata":{"id":"QSTTcZehB1Vo"},"source":["# Using Keras Sequential API\n","-----------------------------\n","\n","`keras` is the implementation of the [Keras API specification](https://keras.io)"]},{"cell_type":"code","metadata":{"id":"ncw0BcFJBu5W","executionInfo":{"status":"ok","timestamp":1605129327775,"user_tz":-60,"elapsed":1862,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"4f4f8633-1346-48e9-9f85-8bf890584db5","colab":{"base_uri":"https://localhost:8080/","height":35}},"source":["import keras\n","\n","keras.__version__"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"},"text/plain":["'2.4.3'"]},"metadata":{"tags":[]},"execution_count":1}]},{"cell_type":"markdown","metadata":{"id":"tma7DTVUIfbJ"},"source":["`tf.keras` is the TensorFlow's implementation of the [Keras API specification](https://keras.io)\n","\n","`tf.keras` can run any Keras-compatible code.\n","\n","Be careful that `tf.keras` version in the latest [TensorFlow](https://www.tensorflow.org/) release might not be the same as the latest `keras`version from [PyPi](https://pypi.org/).\n","\n"]},{"cell_type":"code","metadata":{"id":"Z9QsWXt2CAQU","executionInfo":{"status":"ok","timestamp":1605129327776,"user_tz":-60,"elapsed":1854,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"9280e7c7-422a-4152-d470-f5384b0ac28c","colab":{"base_uri":"https://localhost:8080/","height":35}},"source":["import tensorflow as tf\n","from tensorflow import keras\n","\n","keras.__version__"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"},"text/plain":["'2.4.0'"]},"metadata":{"tags":[]},"execution_count":2}]},{"cell_type":"code","metadata":{"id":"0uRK-joUJUgs"},"source":["import tensorflow as tf\n","from tensorflow.keras.layers import Dense"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Y-xNOr8B4G4"},"source":["Let's start with creating a sequential model by passing a list of layer instances as an array to the constructor"]},{"cell_type":"code","metadata":{"id":"t6dI0-NSLnbw"},"source":["model = tf.keras.Sequential([\n"," # Add a fully connected layer with 1024 units to the model\n"," tf.keras.layers.Dense(1024, input_dim=64),\n"," # Add an activation layer with TanH activation function\n"," tf.keras.layers.Activation('tanh'),\n"," # Add a fully connected layer with 256 units to the model\n"," tf.keras.layers.Dense(256),\n"," # Add an activation layer with ReLU activation function\n"," tf.keras.layers.Activation('relu'),\n"," # Add a fully connected layer with 10 units to the model\n"," tf.keras.layers.Dense(10),\n"," # Add an activation layer with softmax activation function\n"," tf.keras.layers.Activation('softmax')\n","])"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"TTlGOFIhcoOW"},"source":["Let's check out what the model summary looks like:"]},{"cell_type":"code","metadata":{"id":"iGhI-QfiLpBW","executionInfo":{"status":"ok","timestamp":1605129328549,"user_tz":-60,"elapsed":2607,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"29f3dcd6-0742-41c8-93f3-44602614c8df","colab":{"base_uri":"https://localhost:8080/"}},"source":["# Display Model Summary\n","model.summary()"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Model: \"sequential\"\n","_________________________________________________________________\n","Layer (type) Output Shape Param # \n","=================================================================\n","dense (Dense) (None, 1024) 66560 \n","_________________________________________________________________\n","activation (Activation) (None, 1024) 0 \n","_________________________________________________________________\n","dense_1 (Dense) (None, 256) 262400 \n","_________________________________________________________________\n","activation_1 (Activation) (None, 256) 0 \n","_________________________________________________________________\n","dense_2 (Dense) (None, 10) 2570 \n","_________________________________________________________________\n","activation_2 (Activation) (None, 10) 0 \n","=================================================================\n","Total params: 331,530\n","Trainable params: 331,530\n","Non-trainable params: 0\n","_________________________________________________________________\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"5kEhqp4uMLls"},"source":["Another way to create a sequential model is to instantiate a Sequential class and after that add layers via the .add() method.\n"]},{"cell_type":"code","metadata":{"id":"NOSEP-YCLsLr"},"source":["model = tf.keras.Sequential()\n","# Add a fully connected layer with 1024 units to the model\n","model.add(tf.keras.layers.Dense(1024, input_dim=64))\n","# Add an activation layer with TanH activation function\n","model.add(tf.keras.layers.Activation('tanH'))\n","# Add a fully connected layer with 256 units to the model\n","model.add(tf.keras.layers.Dense(256))\n","# Add an activation layer with ReLU activation function\n","model.add(tf.keras.layers.Activation('relu'))\n","# Add a fuly connected Layer with 10 units to the model\n","model.add(tf.keras.layers.Dense(10))\n","# Add an activation layer with softmax activation function\n","model.add(tf.keras.layers.Activation('softmax'))"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"p5zbDDTtcl66"},"source":["Let's check out what the model summary looks like:"]},{"cell_type":"code","metadata":{"id":"iNYmGGSQMoi_","executionInfo":{"status":"ok","timestamp":1605129328551,"user_tz":-60,"elapsed":2597,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"41d5eca0-9454-48a7-d288-d146283a9661","colab":{"base_uri":"https://localhost:8080/"}},"source":["model.summary()"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Model: \"sequential_1\"\n","_________________________________________________________________\n","Layer (type) Output Shape Param # \n","=================================================================\n","dense_3 (Dense) (None, 1024) 66560 \n","_________________________________________________________________\n","activation_3 (Activation) (None, 1024) 0 \n","_________________________________________________________________\n","dense_4 (Dense) (None, 256) 262400 \n","_________________________________________________________________\n","activation_4 (Activation) (None, 256) 0 \n","_________________________________________________________________\n","dense_5 (Dense) (None, 10) 2570 \n","_________________________________________________________________\n","activation_5 (Activation) (None, 10) 0 \n","=================================================================\n","Total params: 331,530\n","Trainable params: 331,530\n","Non-trainable params: 0\n","_________________________________________________________________\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"2PKCbCfLsuek"},"source":["Let us take a closer look at the layer configuration :\n","- The `activation function` decides, whether a neuron should be activated or not"]},{"cell_type":"code","metadata":{"id":"i3vn3BxpqzD7","executionInfo":{"status":"ok","timestamp":1605129328551,"user_tz":-60,"elapsed":2591,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"c22a6829-feb9-4e59-d1db-2b5cde141cac","colab":{"base_uri":"https://localhost:8080/"}},"source":["# Creation of a dense layer with a sigmoid activation function:\n","Dense(256, activation='sigmoid')\n","# Or:\n","Dense(256, activation=tf.keras.activations.sigmoid)"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":8}]},{"cell_type":"markdown","metadata":{"id":"7ZimCulms6us"},"source":["- The initial weights are defined by setting `kernel_initializer` and `bias_initializer` parameters. "]},{"cell_type":"code","metadata":{"id":"5rBnQ-c4tPQ7","executionInfo":{"status":"ok","timestamp":1605129328552,"user_tz":-60,"elapsed":2586,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"2fc41a1f-1384-4751-c1a9-759b5796797d","colab":{"base_uri":"https://localhost:8080/"}},"source":["# A dense layer with a kernel initialized to a truncated normal distribution:\n","Dense(256, kernel_initializer='random_normal')\n","\n"," \n","# A dense layer with a bias vector initialized with a constant value of 5.0:\n","Dense(256, bias_initializer=tf.keras.initializers.Constant(value=5))\n"," "],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":9}]},{"cell_type":"markdown","metadata":{"id":"I9YuvykwtSF7"},"source":["- The `kernel_regularizer` and `bias_regularizer` are regularizers"]},{"cell_type":"code","metadata":{"id":"ocBu4NxGpzni","executionInfo":{"status":"ok","timestamp":1605129328552,"user_tz":-60,"elapsed":2580,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"7673f529-c8da-43ce-aad2-bbcadf7fc692","colab":{"base_uri":"https://localhost:8080/"}},"source":["# A dense layer with L1 regularization of factor 0.01 applied to the kernel matrix:\n","Dense(256, kernel_regularizer=tf.keras.regularizers.l1(0.01))\n"," \n","# A dense layer with L2 regularization of factor 0.01 applied to the bias vector:\n","Dense(256, bias_regularizer=tf.keras.regularizers.l2(0.01))\n"," "],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":10}]},{"cell_type":"markdown","metadata":{"id":"l-8gkC5H1syu"},"source":["Specifying the input shape\n","The argument `input_dim` doesn't contain the `batch_size` because Keras ignores it. The model should be able to deal with any batch size."]},{"cell_type":"code","metadata":{"id":"MReItyAVts7E","executionInfo":{"status":"ok","timestamp":1605129328553,"user_tz":-60,"elapsed":2575,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"6e6e866e-5f5e-4cdd-f21e-f2de6ed58045","colab":{"base_uri":"https://localhost:8080/"}},"source":["Dense(256, input_dim=(64))"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":11}]},{"cell_type":"markdown","metadata":{"id":"1XHIECHO3e7j"},"source":["However, we can force the batch_size with the `batch_size` argument."]},{"cell_type":"code","metadata":{"id":"ePmRexBVtzSy","executionInfo":{"status":"ok","timestamp":1605129328553,"user_tz":-60,"elapsed":2570,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"f3b43541-eff6-42de-b4b1-eb3c26f8a7b6","colab":{"base_uri":"https://localhost:8080/"}},"source":["Dense(256, input_dim=(64), batch_size=10)\n"],"execution_count":null,"outputs":[{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":12}]},{"cell_type":"markdown","metadata":{"id":"C--XDweTgkl8"},"source":["Creation of the 3 toy datasets"]},{"cell_type":"code","metadata":{"id":"dlvngzcFgj_w"},"source":["import numpy as np\n","\n","data = np.random.random((2000, 64))\n","labels = np.random.random((2000, 10))\n","\n","val_data = np.random.random((500, 64))\n","val_labels = np.random.random((500, 10))\n","\n","test_data = np.random.random((500, 64))\n","test_labels = np.random.random((500, 10))"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"ozFLLCuAQFlI"},"source":["Compilation"]},{"cell_type":"code","metadata":{"id":"yviWnGN68zin"},"source":["# Compile a model using adam optimizer \n","# for categorical cross entropy loss and categorical accuracy metric.\n","model.compile(\n"," optimizer=\"adam\", \n"," loss=\"categorical_crossentropy\",\n"," metrics=[\"accuracy\"]\n",")\n"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"yiMXaSlqQGNj"},"source":["Training from Numpy data :"]},{"cell_type":"code","metadata":{"id":"SfSZ886LQGZJ","executionInfo":{"status":"ok","timestamp":1605129331848,"user_tz":-60,"elapsed":5851,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"bfdb5198-6659-492e-e7c5-c7bb2c01982b","colab":{"base_uri":"https://localhost:8080/"}},"source":["model.fit(data, labels, epochs=10, batch_size=50,\n"," validation_data=(val_data, val_labels))\n"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Epoch 1/10\n","40/40 [==============================] - 0s 5ms/step - loss: 43.4399 - accuracy: 0.0915 - val_loss: 117.7406 - val_accuracy: 0.0840\n","Epoch 2/10\n","40/40 [==============================] - 0s 3ms/step - loss: 258.7852 - accuracy: 0.0915 - val_loss: 226.4188 - val_accuracy: 0.1120\n","Epoch 3/10\n","40/40 [==============================] - 0s 3ms/step - loss: 203.4489 - accuracy: 0.1060 - val_loss: 297.2030 - val_accuracy: 0.0940\n","Epoch 4/10\n","40/40 [==============================] - 0s 3ms/step - loss: 614.5685 - accuracy: 0.1105 - val_loss: 1034.5183 - val_accuracy: 0.1120\n","Epoch 5/10\n","40/40 [==============================] - 0s 3ms/step - loss: 1314.0310 - accuracy: 0.1000 - val_loss: 1319.0709 - val_accuracy: 0.0880\n","Epoch 6/10\n","40/40 [==============================] - 0s 3ms/step - loss: 1916.9198 - accuracy: 0.0925 - val_loss: 2958.2812 - val_accuracy: 0.0880\n","Epoch 7/10\n","40/40 [==============================] - 0s 2ms/step - loss: 3135.3259 - accuracy: 0.0840 - val_loss: 3423.9438 - val_accuracy: 0.1040\n","Epoch 8/10\n","40/40 [==============================] - 0s 2ms/step - loss: 3564.3628 - accuracy: 0.1040 - val_loss: 4568.0073 - val_accuracy: 0.0980\n","Epoch 9/10\n","40/40 [==============================] - 0s 2ms/step - loss: 5086.2075 - accuracy: 0.0940 - val_loss: 5656.1157 - val_accuracy: 0.1100\n","Epoch 10/10\n","40/40 [==============================] - 0s 3ms/step - loss: 5816.4668 - accuracy: 0.0980 - val_loss: 7082.7422 - val_accuracy: 0.1080\n"],"name":"stdout"},{"output_type":"execute_result","data":{"text/plain":[""]},"metadata":{"tags":[]},"execution_count":15}]},{"cell_type":"markdown","metadata":{"id":"6PjMbRce_z_-"},"source":["Evaluation: returns the loss value & metrics values for the model in test mode."]},{"cell_type":"code","metadata":{"id":"7AcBiiQm_eJI","executionInfo":{"status":"ok","timestamp":1605129331849,"user_tz":-60,"elapsed":5844,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"b2e52e74-90ff-42bf-dc87-cd41498facf3","colab":{"base_uri":"https://localhost:8080/"}},"source":["model.evaluate(test_data, test_labels, batch_size=50)"],"execution_count":null,"outputs":[{"output_type":"stream","text":["10/10 [==============================] - 0s 2ms/step - loss: 6922.4644 - accuracy: 0.1080\n"],"name":"stdout"},{"output_type":"execute_result","data":{"text/plain":["[6922.46435546875, 0.1080000028014183]"]},"metadata":{"tags":[]},"execution_count":16}]},{"cell_type":"markdown","metadata":{"id":"yIbJejXGHXc2"},"source":["Prediction"]},{"cell_type":"code","metadata":{"id":"_zvwRqqq_7PQ","executionInfo":{"status":"ok","timestamp":1605129332038,"user_tz":-60,"elapsed":6027,"user":{"displayName":"Alexia Audevart","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64","userId":"09723170522858788865"}},"outputId":"a6a6500d-aebf-4724-bd9a-cc140b94ce18","colab":{"base_uri":"https://localhost:8080/"}},"source":["# Prediction\n","result = model.predict(test_data, batch_size=50)\n","print(result.shape)"],"execution_count":null,"outputs":[{"output_type":"stream","text":["(500, 10)\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"axvHIjoZs92_"},"source":[""],"execution_count":null,"outputs":[]}]}
--------------------------------------------------------------------------------
/ch6/02_Implementing_an_Operational_Gate/02_gates.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Implementing Gates\n",
8 | "\n",
9 | "This function shows how to implement various gates in TensorFlow.\n",
10 | "\n",
11 | "One gate will be one operation with a variable and the input tensor of our model. We will ask TensorFlow \n",
12 | "to change the variable based on our loss function!"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "import tensorflow as tf"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "### Gate 1\n",
29 | "\n",
30 | "Create a multiplication gate: $f(x) = a * x$\n",
31 | "```\n",
32 | " a --\n",
33 | " |\n",
34 | " |---- (multiply) --> output\n",
35 | " |\n",
36 | " x --\n",
37 | "```"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "WARNING:tensorflow:\n",
50 | "The following Variables were used a Lambda layer's call (lambda), but\n",
51 | "are not present in its tracked objects:\n",
52 | " \n",
53 | "It is possible that this is intended behavior, but it is more likely\n",
54 | "an omission. This is a strong indication that this layer should be\n",
55 | "formulated as a subclassed Layer rather than a Lambda layer.\n",
56 | "Model: \"gate_1\"\n",
57 | "_________________________________________________________________\n",
58 | "Layer (type) Output Shape Param # \n",
59 | "=================================================================\n",
60 | "input_1 (InputLayer) [(None, 1)] 0 \n",
61 | "_________________________________________________________________\n",
62 | "lambda (Lambda) (None, 1) 0 \n",
63 | "=================================================================\n",
64 | "Total params: 0\n",
65 | "Trainable params: 0\n",
66 | "Non-trainable params: 0\n",
67 | "_________________________________________________________________\n",
68 | "None\n",
69 | "Optimizing a Multiplication Gate Output to 50.\n",
70 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
71 | "Step: 0 ==> 7.0 * 5.0 = 35.0\n",
72 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
73 | "Step: 1 ==> 8.5 * 5.0 = 42.5\n",
74 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
75 | "Step: 2 ==> 9.25 * 5.0 = 46.25\n",
76 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
77 | "Step: 3 ==> 9.625 * 5.0 = 48.125\n",
78 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
79 | "Step: 4 ==> 9.8125 * 5.0 = 49.0625\n",
80 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
81 | "Step: 5 ==> 9.90625 * 5.0 = 49.53125\n",
82 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
83 | "Step: 6 ==> 9.953125 * 5.0 = 49.765625\n",
84 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
85 | "Step: 7 ==> 9.9765625 * 5.0 = 49.8828125\n",
86 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
87 | "Step: 8 ==> 9.98828125 * 5.0 = 49.94140625\n",
88 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_1:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
89 | "Step: 9 ==> 9.994140625 * 5.0 = 49.970703125\n"
90 | ]
91 | }
92 | ],
93 | "source": [
94 | "# Initialize variables and input data\n",
95 | "a = tf.Variable(4.)\n",
96 | "x_data = tf.keras.Input(shape=(1,))\n",
97 | "x_val = 5.\n",
98 | "\n",
99 | "\n",
100 | "# Add a layer which computes f(x) = a * x\n",
101 | "multiply_layer = tf.keras.layers.Lambda(lambda x:tf.multiply(a, x))\n",
102 | "outputs = multiply_layer(x_data)\n",
103 | "\n",
104 | "# Build the model\n",
105 | "model = tf.keras.Model(inputs=x_data, outputs=outputs, name=\"gate_1\")\n",
106 | "\n",
107 | "print(model.summary())\n",
108 | "\n",
109 | "# Optimizer\n",
110 | "optimizer=tf.keras.optimizers.SGD(0.01)\n",
111 | "\n",
112 | "\n",
113 | "# Run loop across gate\n",
114 | "print('Optimizing a Multiplication Gate Output to 50.')\n",
115 | "for i in range(10):\n",
116 | " \n",
117 | " # Open a GradientTape.\n",
118 | " with tf.GradientTape() as tape:\n",
119 | " \n",
120 | " # Forward pass.\n",
121 | " mult_output = model(x_val)\n",
122 | " \n",
123 | " # Loss value as the difference between\n",
124 | " # the output and a target value, 50.\n",
125 | " loss_value = tf.square(tf.subtract(mult_output, 50.))\n",
126 | " \n",
127 | " # Get gradients of loss with reference to the variable \"a\" to adjust.\n",
128 | " gradients = tape.gradient(loss_value, a)\n",
129 | " \n",
130 | " # Update the variable \"a\" of the model.\n",
131 | " optimizer.apply_gradients(zip([gradients], [a]))\n",
132 | " \n",
133 | " print(\"Step: {} ==> {} * {} = {}\".format(i, a.numpy(), x_val, a.numpy() * x_val))"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [
141 | {
142 | "name": "stdout",
143 | "output_type": "stream",
144 | "text": [
145 | "Optimizing a Multiplication Gate Output to 50.\n",
146 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
147 | "Step: 0 ==> 7.0 * 5.0 = 35.0\n",
148 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
149 | "Step: 1 ==> 8.5 * 5.0 = 42.5\n",
150 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
151 | "Step: 2 ==> 9.25 * 5.0 = 46.25\n",
152 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
153 | "Step: 3 ==> 9.625 * 5.0 = 48.125\n",
154 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
155 | "Step: 4 ==> 9.8125 * 5.0 = 49.0625\n",
156 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
157 | "Step: 5 ==> 9.90625 * 5.0 = 49.53125\n",
158 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
159 | "Step: 6 ==> 9.953125 * 5.0 = 49.765625\n",
160 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
161 | "Step: 7 ==> 9.9765625 * 5.0 = 49.8828125\n",
162 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
163 | "Step: 8 ==> 9.98828125 * 5.0 = 49.94140625\n",
164 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_2:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
165 | "Step: 9 ==> 9.994140625 * 5.0 = 49.970703125\n"
166 | ]
167 | }
168 | ],
169 | "source": [
170 | "# Instead of using a lambda layer, we can also use a subclassed layer\n",
171 | "class MyCustomMultiplyLayer(tf.keras.layers.Layer):\n",
172 | " \n",
173 | " def __init__(self, units):\n",
174 | " super(MyCustomMultiplyLayer, self).__init__()\n",
175 | " self.units = units\n",
176 | " self.a = tf.Variable(4.)\n",
177 | "\n",
178 | " def call(self, inputs):\n",
179 | " return inputs * self.a\n",
180 | "\n",
181 | "\n",
182 | "# Initialize variables\n",
183 | "x_data = tf.keras.Input(dtype=tf.float32, shape=(1,))\n",
184 | "a = tf.Variable(4, dtype=tf.float32)\n",
185 | "\n",
186 | "# Add a layer which computes f(x) = a * x\n",
187 | "multiply_layer = MyCustomMultiplyLayer(units=1)\n",
188 | "outputs = multiply_layer(x_data)\n",
189 | "\n",
190 | "# Build the model\n",
191 | "model = tf.keras.Model(inputs=x_data, outputs=outputs, name=\"gate_1\")\n",
192 | "\n",
193 | "#print(model.summary())\n",
194 | "\n",
195 | "# Optimizer\n",
196 | "optimizer=tf.keras.optimizers.SGD(0.01)\n",
197 | "\n",
198 | "# Run loop across gate\n",
199 | "print('Optimizing a Multiplication Gate Output to 50.')\n",
200 | "for i in range(10):\n",
201 | " \n",
202 | " # Open a GradientTape.\n",
203 | " with tf.GradientTape() as tape:\n",
204 | " \n",
205 | " # Forward pass.\n",
206 | " mult_output = model(5.)\n",
207 | " \n",
208 | " # Loss value as the difference between\n",
209 | " # the output and a target value, 50.\n",
210 | " loss_value = tf.square(tf.subtract(mult_output, 50.))\n",
211 | " \n",
212 | " # Get gradients of loss with reference to the variable \"a\" to adjust.\n",
213 | " gradients = tape.gradient(loss_value, multiply_layer.a)\n",
214 | " \n",
215 | " # Update the weights of the model.\n",
216 | " optimizer.apply_gradients(zip([gradients], [multiply_layer.a]))\n",
217 | " \n",
218 | " print(\"Step: {} ==> {} * {} = {}\".format(i, multiply_layer.a.numpy(), x_val, multiply_layer.a.numpy() * x_val))"
219 | ]
220 | },
221 | {
222 | "cell_type": "markdown",
223 | "metadata": {
224 | "pycharm": {
225 | "name": "#%% md\n"
226 | }
227 | },
228 | "source": [
229 | "### Gate 2\n",
230 | "\n",
231 | "Create a nested gate: $f(x) = a * x + b$\n",
232 | "\n",
233 | "```\n",
234 | " a --\n",
235 | " |\n",
236 | " |-- (multiply)--\n",
237 | " | |\n",
238 | " x -- |-- (add) --> output\n",
239 | " |\n",
240 | " b --\n",
241 | "```"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": null,
247 | "metadata": {
248 | "pycharm": {
249 | "name": "#%%\n"
250 | }
251 | },
252 | "outputs": [
253 | {
254 | "name": "stdout",
255 | "output_type": "stream",
256 | "text": [
257 | "WARNING:tensorflow:\n",
258 | "The following Variables were used a Lambda layer's call (lambda_1), but\n",
259 | "are not present in its tracked objects:\n",
260 | " \n",
261 | "It is possible that this is intended behavior, but it is more likely\n",
262 | "an omission. This is a strong indication that this layer should be\n",
263 | "formulated as a subclassed Layer rather than a Lambda layer.\n",
264 | "WARNING:tensorflow:\n",
265 | "The following Variables were used a Lambda layer's call (lambda_2), but\n",
266 | "are not present in its tracked objects:\n",
267 | " \n",
268 | "It is possible that this is intended behavior, but it is more likely\n",
269 | "an omission. This is a strong indication that this layer should be\n",
270 | "formulated as a subclassed Layer rather than a Lambda layer.\n",
271 | "Model: \"gate_2\"\n",
272 | "_________________________________________________________________\n",
273 | "Layer (type) Output Shape Param # \n",
274 | "=================================================================\n",
275 | "input_3 (InputLayer) [(None, 1)] 0 \n",
276 | "_________________________________________________________________\n",
277 | "lambda_1 (Lambda) (None, 1) 0 \n",
278 | "_________________________________________________________________\n",
279 | "lambda_2 (Lambda) (None, 1) 0 \n",
280 | "=================================================================\n",
281 | "Total params: 0\n",
282 | "Trainable params: 0\n",
283 | "Non-trainable params: 0\n",
284 | "_________________________________________________________________\n",
285 | "None\n",
286 | "Optimizing two Gate Output to 50.\n",
287 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
288 | "Step: 0 ==> 5.400000095367432 * 5.0 + 1.8799999952316284= 28.880000472068787\n",
289 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
290 | "Step: 1 ==> 7.51200008392334 * 5.0 + 2.3024001121520996= 39.8624005317688\n",
291 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
292 | "Step: 2 ==> 8.52575969696045 * 5.0 + 2.5051522254943848= 45.13395071029663\n",
293 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
294 | "Step: 3 ==> 9.012364387512207 * 5.0 + 2.602473258972168= 47.6642951965332\n",
295 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
296 | "Step: 4 ==> 9.24593448638916 * 5.0 + 2.6491873264312744= 48.878859758377075\n",
297 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
298 | "Step: 5 ==> 9.358048439025879 * 5.0 + 2.671610116958618= 49.46185231208801\n",
299 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
300 | "Step: 6 ==> 9.411863327026367 * 5.0 + 2.682373046875= 49.741689682006836\n",
301 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
302 | "Step: 7 ==> 9.437694549560547 * 5.0 + 2.6875391006469727= 49.87601184844971\n",
303 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
304 | "Step: 8 ==> 9.450093269348145 * 5.0 + 2.690018892288208= 49.94048523902893\n",
305 | "WARNING:tensorflow:Model was constructed with shape Tensor(\"input_3:0\", shape=(None, 1), dtype=float32) for input (None, 1), but it was re-called on a Tensor with incompatible shape ().\n",
306 | "Step: 9 ==> 9.456045150756836 * 5.0 + 2.691209316253662= 49.97143507003784\n"
307 | ]
308 | }
309 | ],
310 | "source": [
311 | "# Initialize variables and input data\n",
312 | "x_data = tf.keras.Input(dtype=tf.float32, shape=(1,))\n",
313 | "x_val = 5.\n",
314 | "a = tf.Variable(1., dtype=tf.float32)\n",
315 | "b = tf.Variable(1., dtype=tf.float32)\n",
316 | "\n",
317 | "# Add a layer which computes f(x) = a * x\n",
318 | "multiply_layer = tf.keras.layers.Lambda(lambda x:tf.multiply(a, x))\n",
319 | "\n",
320 | "# Add a layer which computes f(x) = b + x\n",
321 | "add_layer = tf.keras.layers.Lambda(lambda x:tf.add(b, x))\n",
322 | "\n",
323 | "res = multiply_layer(x_data)\n",
324 | "outputs = add_layer(res)\n",
325 | "\n",
326 | "# Build the model\n",
327 | "model = tf.keras.Model(inputs=x_data, outputs=outputs, name=\"gate_2\")\n",
328 | "\n",
329 | "print(model.summary())\n",
330 | "\n",
331 | "# Optimizer\n",
332 | "optimizer=tf.keras.optimizers.SGD(0.01)\n",
333 | "\n",
334 | "# Run loop across gate\n",
335 | "print('Optimizing two Gate Output to 50.')\n",
336 | "for i in range(10):\n",
337 | " \n",
338 | " # Open a GradientTape.\n",
339 | " with tf.GradientTape(persistent=True) as tape:\n",
340 | " \n",
341 | " # Forward pass.\n",
342 | " two_gate_output = model(x_val)\n",
343 | " \n",
344 | " # Loss value as the difference between\n",
345 | " # the output and a target value, 50.\n",
346 | " loss_value = tf.square(tf.subtract(two_gate_output, 50.))\n",
347 | " \n",
348 | " # Get gradients of loss with reference to the variables \"a\" and \"b\" to adjust.\n",
349 | " gradients_a = tape.gradient(loss_value, a)\n",
350 | " gradients_b = tape.gradient(loss_value , b)\n",
351 | " \n",
352 | " # Update the variables \"a\" and \"b\" of the model.\n",
353 | " optimizer.apply_gradients(zip([gradients_a, gradients_b], [a, b]))\n",
354 | " \n",
355 | " print(\"Step: {} ==> {} * {} + {}= {}\".format(i, a.numpy(),\n",
356 | " x_val, b.numpy(),\n",
357 | " a.numpy() * x_val + b.numpy()))\n",
358 | " \n",
359 | " \n",
360 | " "
361 | ]
362 | }
363 | ],
364 | "metadata": {
365 | "kernelspec": {
366 | "display_name": "tf2",
367 | "language": "python",
368 | "name": "tf2"
369 | },
370 | "language_info": {
371 | "codemirror_mode": {
372 | "name": "ipython",
373 | "version": 3
374 | },
375 | "file_extension": ".py",
376 | "mimetype": "text/x-python",
377 | "name": "python",
378 | "nbconvert_exporter": "python",
379 | "pygments_lexer": "ipython3",
380 | "version": "3.7.7"
381 | },
382 | "pycharm": {
383 | "stem_cell": {
384 | "cell_type": "raw",
385 | "metadata": {
386 | "collapsed": false
387 | },
388 | "source": []
389 | }
390 | }
391 | },
392 | "nbformat": 4,
393 | "nbformat_minor": 4
394 | }
395 |
--------------------------------------------------------------------------------
/ch12/05_Parallelizing_TensorFlow/05_parallelizing_tensorflow.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "KItfUAGxRzZx"
8 | },
9 | "source": [
10 | "# Parallelizing TensorFlow with Mirrored Strategy\n",
11 | "\n",
12 | "We will show how to use TensorFlow Distributed API"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {
19 | "colab": {},
20 | "colab_type": "code",
21 | "executionInfo": {
22 | "elapsed": 2868,
23 | "status": "ok",
24 | "timestamp": 1601104203658,
25 | "user": {
26 | "displayName": "Alexia Audevart",
27 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
28 | "userId": "09723170522858788865"
29 | },
30 | "user_tz": -120
31 | },
32 | "id": "ReNFcbMcRzZy"
33 | },
34 | "outputs": [],
35 | "source": [
36 | "import tensorflow as tf\n",
37 | "import tensorflow_datasets as tfds"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {
43 | "colab_type": "text",
44 | "id": "hKtzSUYlWfku"
45 | },
46 | "source": [
47 | "We will create two virtual GPUs"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {
54 | "colab": {},
55 | "colab_type": "code",
56 | "executionInfo": {
57 | "elapsed": 4234,
58 | "status": "ok",
59 | "timestamp": 1601104205028,
60 | "user": {
61 | "displayName": "Alexia Audevart",
62 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
63 | "userId": "09723170522858788865"
64 | },
65 | "user_tz": -120
66 | },
67 | "id": "KMT_ulnFRzZ1"
68 | },
69 | "outputs": [],
70 | "source": [
71 | "# Create two virtual GPUs\n",
72 | "gpu_devices = tf.config.list_physical_devices('GPU')\n",
73 | "if gpu_devices:\n",
74 | " try:\n",
75 | " tf.config.experimental.set_virtual_device_configuration(gpu_devices[0],\n",
76 | " [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024),\n",
77 | " tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024) ])\n",
78 | " except RuntimeError as e:\n",
79 | " # Memory growth cannot be modified after GPU has been initialized\n",
80 | " print(e)"
81 | ]
82 | },
83 | {
84 | "cell_type": "markdown",
85 | "metadata": {
86 | "colab_type": "text",
87 | "id": "1_4Qe4j6pM93"
88 | },
89 | "source": [
90 | "Next, we will load the MNIST dataset via the `tensorflow_datasets` API."
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {
97 | "colab": {
98 | "base_uri": "https://localhost:8080/",
99 | "height": 211,
100 | "referenced_widgets": [
101 | "23b97c9cd41e4b92b571c7a3547e373c",
102 | "f6e0f1091af043548de0b55203aa4ac5",
103 | "b531948290e0427d8249c410ec8a2db2",
104 | "52238e0518364cf2adf113bb24a91e68",
105 | "42294c4dee554001a5878572d85f7343",
106 | "6ca3b4d177084271aab1437bd4d88eca",
107 | "d4982937522d47508a0055971927d075",
108 | "5e57b7c347b74fd097924786f170b3df"
109 | ]
110 | },
111 | "colab_type": "code",
112 | "executionInfo": {
113 | "elapsed": 9817,
114 | "status": "ok",
115 | "timestamp": 1601104210613,
116 | "user": {
117 | "displayName": "Alexia Audevart",
118 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
119 | "userId": "09723170522858788865"
120 | },
121 | "user_tz": -120
122 | },
123 | "id": "paguPXOPRzZ6",
124 | "outputId": "26b871fc-20d3-4242-8de3-c4c32101f175"
125 | },
126 | "outputs": [],
127 | "source": [
128 | "datasets, info = tfds.load('mnist', with_info=True, as_supervised=True)\n",
129 | "mnist_train, mnist_test = datasets['train'], datasets['test']"
130 | ]
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "metadata": {
135 | "colab_type": "text",
136 | "id": "9zk1S2q-pgzM"
137 | },
138 | "source": [
139 | "Then, we will prepare the data. "
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {
146 | "colab": {},
147 | "colab_type": "code",
148 | "executionInfo": {
149 | "elapsed": 9816,
150 | "status": "ok",
151 | "timestamp": 1601104210614,
152 | "user": {
153 | "displayName": "Alexia Audevart",
154 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
155 | "userId": "09723170522858788865"
156 | },
157 | "user_tz": -120
158 | },
159 | "id": "zm0yCf2iRzZ8"
160 | },
161 | "outputs": [],
162 | "source": [
163 | "def normalize_img(image, label):\n",
164 | " \"\"\"Normalizes images: `uint8` -> `float32`.\"\"\"\n",
165 | " return tf.cast(image, tf.float32) / 255., label\n",
166 | "\n",
167 | "mnist_train = mnist_train.map(\n",
168 | " normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
169 | "mnist_train = mnist_train.cache()\n",
170 | "mnist_train = mnist_train.shuffle(info.splits['train'].num_examples)\n",
171 | "mnist_train = mnist_train.prefetch(tf.data.experimental.AUTOTUNE)\n",
172 | "\n",
173 | "\n",
174 | "mnist_test = mnist_test.map(\n",
175 | " normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
176 | "mnist_test = mnist_test.cache()\n",
177 | "mnist_test = mnist_test.prefetch(tf.data.experimental.AUTOTUNE)"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {
183 | "colab_type": "text",
184 | "id": "hOFsrnUTRzaB"
185 | },
186 | "source": [
187 | "We are now ready to apply a mirrored strategy. The goal of this strategy is to replicate the model across all GPUs on the same machine.\n",
188 | "Each model is trained on different batches of data and a synchronous training strategy is applied. "
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": null,
194 | "metadata": {
195 | "colab": {
196 | "base_uri": "https://localhost:8080/",
197 | "height": 89
198 | },
199 | "colab_type": "code",
200 | "executionInfo": {
201 | "elapsed": 9794,
202 | "status": "ok",
203 | "timestamp": 1601104210615,
204 | "user": {
205 | "displayName": "Alexia Audevart",
206 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
207 | "userId": "09723170522858788865"
208 | },
209 | "user_tz": -120
210 | },
211 | "id": "pBm-OtpARzaB",
212 | "outputId": "038fa280-6603-46dd-bdfa-0c5a5e140b23"
213 | },
214 | "outputs": [],
215 | "source": [
216 | "mirrored_strategy = tf.distribute.MirroredStrategy()"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "metadata": {
222 | "colab_type": "text",
223 | "id": "qixn64lUq60_"
224 | },
225 | "source": [
226 | "We check that we have two devices corresponding to the two virtual GPUs created at the beginning of this recipe."
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": null,
232 | "metadata": {
233 | "colab": {
234 | "base_uri": "https://localhost:8080/",
235 | "height": 35
236 | },
237 | "colab_type": "code",
238 | "executionInfo": {
239 | "elapsed": 9787,
240 | "status": "ok",
241 | "timestamp": 1601104210615,
242 | "user": {
243 | "displayName": "Alexia Audevart",
244 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
245 | "userId": "09723170522858788865"
246 | },
247 | "user_tz": -120
248 | },
249 | "id": "H_hL_Nh-R7ys",
250 | "outputId": "f688a8dd-46e3-40e8-a20f-ebe115fa298d"
251 | },
252 | "outputs": [],
253 | "source": [
254 | "print('Number of devices: {}'.format(mirrored_strategy.num_replicas_in_sync))"
255 | ]
256 | },
257 | {
258 | "cell_type": "markdown",
259 | "metadata": {
260 | "colab_type": "text",
261 | "id": "V58jdE6PrkPa"
262 | },
263 | "source": [
264 | "We'll define the batch size"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": null,
270 | "metadata": {
271 | "colab": {},
272 | "colab_type": "code",
273 | "executionInfo": {
274 | "elapsed": 10087,
275 | "status": "ok",
276 | "timestamp": 1601104210917,
277 | "user": {
278 | "displayName": "Alexia Audevart",
279 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
280 | "userId": "09723170522858788865"
281 | },
282 | "user_tz": -120
283 | },
284 | "id": "4AitsRrCWFGI"
285 | },
286 | "outputs": [],
287 | "source": [
288 | "BATCH_SIZE_PER_REPLICA = 128\n",
289 | "BATCH_SIZE = BATCH_SIZE_PER_REPLICA * mirrored_strategy.num_replicas_in_sync\n",
290 | "\n",
291 | "mnist_train = mnist_train.batch(BATCH_SIZE)\n",
292 | "mnist_test = mnist_test.batch(BATCH_SIZE)"
293 | ]
294 | },
295 | {
296 | "cell_type": "markdown",
297 | "metadata": {
298 | "colab_type": "text",
299 | "id": "9kSzsZ44sCAH"
300 | },
301 | "source": [
302 | "We'll define and compile our model using the mirrored strategy.\n"
303 | ]
304 | },
305 | {
306 | "cell_type": "code",
307 | "execution_count": null,
308 | "metadata": {
309 | "colab": {},
310 | "colab_type": "code",
311 | "executionInfo": {
312 | "elapsed": 10085,
313 | "status": "ok",
314 | "timestamp": 1601104210918,
315 | "user": {
316 | "displayName": "Alexia Audevart",
317 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
318 | "userId": "09723170522858788865"
319 | },
320 | "user_tz": -120
321 | },
322 | "id": "iHg3UbiwRzaG"
323 | },
324 | "outputs": [],
325 | "source": [
326 | "with mirrored_strategy.scope():\n",
327 | " model = tf.keras.Sequential()\n",
328 | " model.add(tf.keras.layers.Flatten(name=\"FLATTEN\"))\n",
329 | " model.add(tf.keras.layers.Dense(units=128 , activation=\"relu\", name=\"D1\"))\n",
330 | " model.add(tf.keras.layers.Dense(units=64 , activation=\"relu\", name=\"D2\"))\n",
331 | " model.add(tf.keras.layers.Dense(units=10, activation=\"softmax\", name=\"OUTPUT\"))\n",
332 | " \n",
333 | " model.compile(\n",
334 | " optimizer=\"sgd\", \n",
335 | " loss=\"sparse_categorical_crossentropy\",\n",
336 | " metrics=[\"accuracy\"]\n",
337 | " )\n"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": null,
343 | "metadata": {
344 | "colab": {
345 | "base_uri": "https://localhost:8080/",
346 | "height": 1000
347 | },
348 | "colab_type": "code",
349 | "id": "e1yVd5BrTSPO",
350 | "outputId": "c79aef2b-8daf-426a-b265-585fd4d79328"
351 | },
352 | "outputs": [],
353 | "source": [
354 | "model.fit(mnist_train, \n",
355 | " epochs=10,\n",
356 | " validation_data= mnist_test\n",
357 | " )"
358 | ]
359 | }
360 | ],
361 | "metadata": {
362 | "accelerator": "GPU",
363 | "colab": {
364 | "name": "05_parallelizing_tensorflow.ipynb",
365 | "provenance": []
366 | },
367 | "kernelspec": {
368 | "display_name": "Python 3",
369 | "language": "python",
370 | "name": "python3"
371 | },
372 | "language_info": {
373 | "codemirror_mode": {
374 | "name": "ipython",
375 | "version": 3
376 | },
377 | "file_extension": ".py",
378 | "mimetype": "text/x-python",
379 | "name": "python",
380 | "nbconvert_exporter": "python",
381 | "pygments_lexer": "ipython3",
382 | "version": "3.7.4"
383 | },
384 | "widgets": {
385 | "application/vnd.jupyter.widget-state+json": {
386 | "23b97c9cd41e4b92b571c7a3547e373c": {
387 | "model_module": "@jupyter-widgets/controls",
388 | "model_name": "HBoxModel",
389 | "state": {
390 | "_dom_classes": [],
391 | "_model_module": "@jupyter-widgets/controls",
392 | "_model_module_version": "1.5.0",
393 | "_model_name": "HBoxModel",
394 | "_view_count": null,
395 | "_view_module": "@jupyter-widgets/controls",
396 | "_view_module_version": "1.5.0",
397 | "_view_name": "HBoxView",
398 | "box_style": "",
399 | "children": [
400 | "IPY_MODEL_b531948290e0427d8249c410ec8a2db2",
401 | "IPY_MODEL_52238e0518364cf2adf113bb24a91e68"
402 | ],
403 | "layout": "IPY_MODEL_f6e0f1091af043548de0b55203aa4ac5"
404 | }
405 | },
406 | "42294c4dee554001a5878572d85f7343": {
407 | "model_module": "@jupyter-widgets/controls",
408 | "model_name": "ProgressStyleModel",
409 | "state": {
410 | "_model_module": "@jupyter-widgets/controls",
411 | "_model_module_version": "1.5.0",
412 | "_model_name": "ProgressStyleModel",
413 | "_view_count": null,
414 | "_view_module": "@jupyter-widgets/base",
415 | "_view_module_version": "1.2.0",
416 | "_view_name": "StyleView",
417 | "bar_color": null,
418 | "description_width": "initial"
419 | }
420 | },
421 | "52238e0518364cf2adf113bb24a91e68": {
422 | "model_module": "@jupyter-widgets/controls",
423 | "model_name": "HTMLModel",
424 | "state": {
425 | "_dom_classes": [],
426 | "_model_module": "@jupyter-widgets/controls",
427 | "_model_module_version": "1.5.0",
428 | "_model_name": "HTMLModel",
429 | "_view_count": null,
430 | "_view_module": "@jupyter-widgets/controls",
431 | "_view_module_version": "1.5.0",
432 | "_view_name": "HTMLView",
433 | "description": "",
434 | "description_tooltip": null,
435 | "layout": "IPY_MODEL_5e57b7c347b74fd097924786f170b3df",
436 | "placeholder": "",
437 | "style": "IPY_MODEL_d4982937522d47508a0055971927d075",
438 | "value": " 4/4 [00:04<00:00, 1.21s/ file]"
439 | }
440 | },
441 | "5e57b7c347b74fd097924786f170b3df": {
442 | "model_module": "@jupyter-widgets/base",
443 | "model_name": "LayoutModel",
444 | "state": {
445 | "_model_module": "@jupyter-widgets/base",
446 | "_model_module_version": "1.2.0",
447 | "_model_name": "LayoutModel",
448 | "_view_count": null,
449 | "_view_module": "@jupyter-widgets/base",
450 | "_view_module_version": "1.2.0",
451 | "_view_name": "LayoutView",
452 | "align_content": null,
453 | "align_items": null,
454 | "align_self": null,
455 | "border": null,
456 | "bottom": null,
457 | "display": null,
458 | "flex": null,
459 | "flex_flow": null,
460 | "grid_area": null,
461 | "grid_auto_columns": null,
462 | "grid_auto_flow": null,
463 | "grid_auto_rows": null,
464 | "grid_column": null,
465 | "grid_gap": null,
466 | "grid_row": null,
467 | "grid_template_areas": null,
468 | "grid_template_columns": null,
469 | "grid_template_rows": null,
470 | "height": null,
471 | "justify_content": null,
472 | "justify_items": null,
473 | "left": null,
474 | "margin": null,
475 | "max_height": null,
476 | "max_width": null,
477 | "min_height": null,
478 | "min_width": null,
479 | "object_fit": null,
480 | "object_position": null,
481 | "order": null,
482 | "overflow": null,
483 | "overflow_x": null,
484 | "overflow_y": null,
485 | "padding": null,
486 | "right": null,
487 | "top": null,
488 | "visibility": null,
489 | "width": null
490 | }
491 | },
492 | "6ca3b4d177084271aab1437bd4d88eca": {
493 | "model_module": "@jupyter-widgets/base",
494 | "model_name": "LayoutModel",
495 | "state": {
496 | "_model_module": "@jupyter-widgets/base",
497 | "_model_module_version": "1.2.0",
498 | "_model_name": "LayoutModel",
499 | "_view_count": null,
500 | "_view_module": "@jupyter-widgets/base",
501 | "_view_module_version": "1.2.0",
502 | "_view_name": "LayoutView",
503 | "align_content": null,
504 | "align_items": null,
505 | "align_self": null,
506 | "border": null,
507 | "bottom": null,
508 | "display": null,
509 | "flex": null,
510 | "flex_flow": null,
511 | "grid_area": null,
512 | "grid_auto_columns": null,
513 | "grid_auto_flow": null,
514 | "grid_auto_rows": null,
515 | "grid_column": null,
516 | "grid_gap": null,
517 | "grid_row": null,
518 | "grid_template_areas": null,
519 | "grid_template_columns": null,
520 | "grid_template_rows": null,
521 | "height": null,
522 | "justify_content": null,
523 | "justify_items": null,
524 | "left": null,
525 | "margin": null,
526 | "max_height": null,
527 | "max_width": null,
528 | "min_height": null,
529 | "min_width": null,
530 | "object_fit": null,
531 | "object_position": null,
532 | "order": null,
533 | "overflow": null,
534 | "overflow_x": null,
535 | "overflow_y": null,
536 | "padding": null,
537 | "right": null,
538 | "top": null,
539 | "visibility": null,
540 | "width": null
541 | }
542 | },
543 | "b531948290e0427d8249c410ec8a2db2": {
544 | "model_module": "@jupyter-widgets/controls",
545 | "model_name": "FloatProgressModel",
546 | "state": {
547 | "_dom_classes": [],
548 | "_model_module": "@jupyter-widgets/controls",
549 | "_model_module_version": "1.5.0",
550 | "_model_name": "FloatProgressModel",
551 | "_view_count": null,
552 | "_view_module": "@jupyter-widgets/controls",
553 | "_view_module_version": "1.5.0",
554 | "_view_name": "ProgressView",
555 | "bar_style": "success",
556 | "description": "Dl Completed...: 100%",
557 | "description_tooltip": null,
558 | "layout": "IPY_MODEL_6ca3b4d177084271aab1437bd4d88eca",
559 | "max": 4,
560 | "min": 0,
561 | "orientation": "horizontal",
562 | "style": "IPY_MODEL_42294c4dee554001a5878572d85f7343",
563 | "value": 4
564 | }
565 | },
566 | "d4982937522d47508a0055971927d075": {
567 | "model_module": "@jupyter-widgets/controls",
568 | "model_name": "DescriptionStyleModel",
569 | "state": {
570 | "_model_module": "@jupyter-widgets/controls",
571 | "_model_module_version": "1.5.0",
572 | "_model_name": "DescriptionStyleModel",
573 | "_view_count": null,
574 | "_view_module": "@jupyter-widgets/base",
575 | "_view_module_version": "1.2.0",
576 | "_view_name": "StyleView",
577 | "description_width": ""
578 | }
579 | },
580 | "f6e0f1091af043548de0b55203aa4ac5": {
581 | "model_module": "@jupyter-widgets/base",
582 | "model_name": "LayoutModel",
583 | "state": {
584 | "_model_module": "@jupyter-widgets/base",
585 | "_model_module_version": "1.2.0",
586 | "_model_name": "LayoutModel",
587 | "_view_count": null,
588 | "_view_module": "@jupyter-widgets/base",
589 | "_view_module_version": "1.2.0",
590 | "_view_name": "LayoutView",
591 | "align_content": null,
592 | "align_items": null,
593 | "align_self": null,
594 | "border": null,
595 | "bottom": null,
596 | "display": null,
597 | "flex": null,
598 | "flex_flow": null,
599 | "grid_area": null,
600 | "grid_auto_columns": null,
601 | "grid_auto_flow": null,
602 | "grid_auto_rows": null,
603 | "grid_column": null,
604 | "grid_gap": null,
605 | "grid_row": null,
606 | "grid_template_areas": null,
607 | "grid_template_columns": null,
608 | "grid_template_rows": null,
609 | "height": null,
610 | "justify_content": null,
611 | "justify_items": null,
612 | "left": null,
613 | "margin": null,
614 | "max_height": null,
615 | "max_width": null,
616 | "min_height": null,
617 | "min_width": null,
618 | "object_fit": null,
619 | "object_position": null,
620 | "order": null,
621 | "overflow": null,
622 | "overflow_x": null,
623 | "overflow_y": null,
624 | "padding": null,
625 | "right": null,
626 | "top": null,
627 | "visibility": null,
628 | "width": null
629 | }
630 | }
631 | }
632 | }
633 | },
634 | "nbformat": 4,
635 | "nbformat_minor": 4
636 | }
637 |
--------------------------------------------------------------------------------
/ch6/08_Learning_Tic_Tac_Toe/TicTacToe.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "-FirtvTa3Mzu"
7 | },
8 | "source": [
9 | "# Learning Optimal Tic-Tac-Toe Moves via a Neural Network\n",
10 | "-------------------------------------------------------\n",
11 | "We will build a one-hidden layer neural network\n",
12 | " to predict the optimal response given a set\n",
13 | " of tic-tac-toe boards."
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "metadata": {
20 | "executionInfo": {
21 | "elapsed": 3246,
22 | "status": "ok",
23 | "timestamp": 1601114572980,
24 | "user": {
25 | "displayName": "Alexia Audevart",
26 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggwako-F982wqMhMKBGTSz7xNQvuu4pmC79syucvA=s64",
27 | "userId": "09723170522858788865"
28 | },
29 | "user_tz": -120
30 | },
31 | "id": "nOWWj2Px3Mzv"
32 | },
33 | "outputs": [],
34 | "source": [
35 | "import tensorflow as tf\n",
36 | "#import matplotlib.pyplot as plt\n",
37 | "import csv\n",
38 | "import numpy as np\n",
39 | "import random"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {
46 | "id": "O1JPvr5t3Mzy"
47 | },
48 | "outputs": [],
49 | "source": [
50 | "# Definition of X's, O's, and empty spots:\n",
51 | "# X = 1\n",
52 | "# O = -1\n",
53 | "# empty = 0\n",
54 | "# response on 1-9 grid for placement of next '1'\n",
55 | "\n",
56 | "# For example, the 'test_board' is:\n",
57 | "#\n",
58 | "# O | - | -\n",
59 | "# -----------------\n",
60 | "# X | O | O\n",
61 | "# -----------------\n",
62 | "# - | - | X\n",
63 | "#\n",
64 | "# board above = [-1, 0, 0, 1, -1, -1, 0, 0, 1]\n",
65 | "# Optimal response would be position 6, where\n",
66 | "# the position numbers are:\n",
67 | "#\n",
68 | "# 0 | 1 | 2\n",
69 | "# -----------------\n",
70 | "# 3 | 4 | 5\n",
71 | "# -----------------\n",
72 | "# 6 | 7 | 8\n",
73 | "\n",
74 | "\n",
75 | "# Test board optimal response:\n",
76 | "#response = 6\n",
77 | "# Set batch size and five different symmetries of board positions\n",
78 | "batch_size = 50"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": null,
84 | "metadata": {
85 | "id": "mOc2vBh93Mz1"
86 | },
87 | "outputs": [],
88 | "source": [
89 | "# Print a board\n",
90 | "def print_board(board):\n",
91 | " symbols = ['O', ' ', 'X']\n",
92 | " board_plus1 = [int(x) + 1 for x in board]\n",
93 | " board_line1 = ' {} | {} | {}'.format(symbols[board_plus1[0]],\n",
94 | " symbols[board_plus1[1]],\n",
95 | " symbols[board_plus1[2]])\n",
96 | " board_line2 = ' {} | {} | {}'.format(symbols[board_plus1[3]],\n",
97 | " symbols[board_plus1[4]],\n",
98 | " symbols[board_plus1[5]])\n",
99 | " board_line3 = ' {} | {} | {}'.format(symbols[board_plus1[6]],\n",
100 | " symbols[board_plus1[7]],\n",
101 | " symbols[board_plus1[8]])\n",
102 | " print(board_line1)\n",
103 | " print('___________')\n",
104 | " print(board_line2)\n",
105 | " print('___________')\n",
106 | " print(board_line3)"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {
113 | "id": "nlxJc_D33Mz3"
114 | },
115 | "outputs": [],
116 | "source": [
117 | "# Data Augmentation\n",
118 | "symmetry = ['rotate180', 'rotate90', 'rotate270', 'flip_v', 'flip_h']\n",
119 | "\n",
120 | "# Given a board, a response, and a transformation, get the new board+response\n",
121 | "def get_symmetry(board, play_response, transformation):\n",
122 | " \"\"\"\n",
123 | " :param board: list of integers 9 long:\n",
124 | " opposing mark = -1\n",
125 | " friendly mark = 1\n",
126 | " empty space = 0\n",
127 | " :param play_response: integer of where response is (0-8)\n",
128 | " :param transformation: one of five transformations on a board:\n",
129 | " 'rotate180', 'rotate90', 'rotate270', 'flip_v', 'flip_h'\n",
130 | " :return: tuple: (new_board, new_response)\n",
131 | " \"\"\"\n",
132 | " if transformation == 'rotate180':\n",
133 | " new_response = 8 - play_response\n",
134 | " return board[::-1], new_response\n",
135 | " elif transformation == 'rotate90':\n",
136 | " new_response = [6, 3, 0, 7, 4, 1, 8, 5, 2].index(play_response)\n",
137 | " tuple_board = list(zip(*[board[6:9], board[3:6], board[0:3]]))\n",
138 | " return [value for item in tuple_board for value in item], new_response\n",
139 | " elif transformation == 'rotate270':\n",
140 | " new_response = [2, 5, 8, 1, 4, 7, 0, 3, 6].index(play_response)\n",
141 | " tuple_board = list(zip(*[board[0:3], board[3:6], board[6:9]]))[::-1]\n",
142 | " return [value for item in tuple_board for value in item], new_response\n",
143 | " elif transformation == 'flip_v':\n",
144 | " new_response = [6, 7, 8, 3, 4, 5, 0, 1, 2].index(play_response)\n",
145 | " return board[6:9] + board[3:6] + board[0:3], new_response\n",
146 | " elif transformation == 'flip_h': # flip_h = rotate180, then flip_v\n",
147 | " new_response = [2, 1, 0, 5, 4, 3, 8, 7, 6].index(play_response)\n",
148 | " new_board = board[::-1]\n",
149 | " return new_board[6:9] + new_board[3:6] + new_board[0:3], new_response\n",
150 | " else:\n",
151 | " raise ValueError('Method not implemented.')\n",
152 | "\n",
153 | "\n",
154 | "# Read in board move csv file\n",
155 | "def get_moves_from_csv(csv_file):\n",
156 | " \"\"\"\n",
157 | " :param csv_file: csv file location containing the boards w/ responses\n",
158 | " :return: moves: list of moves with index of best response\n",
159 | " \"\"\"\n",
160 | " play_moves = []\n",
161 | " with open(csv_file, 'rt') as csvfile:\n",
162 | " reader = csv.reader(csvfile, delimiter=',')\n",
163 | " for row in reader:\n",
164 | " play_moves.append(([int(x) for x in row[0:9]], int(row[9])))\n",
165 | " return play_moves\n",
166 | "\n",
167 | "\n",
168 | "# Get random board with optimal move\n",
169 | "def get_rand_move(play_moves, rand_transforms=2):\n",
170 | " \"\"\"\n",
171 | " :param play_moves: list of the boards w/responses\n",
172 | " :param rand_transforms: how many random transforms performed on each\n",
173 | " :return: (board, response), board is a list of 9 integers, response is 1 int\n",
174 | " \"\"\"\n",
175 | " (board, play_response) = random.choice(play_moves)\n",
176 | " possible_transforms = ['rotate90', 'rotate180', 'rotate270', 'flip_v', 'flip_h']\n",
177 | " for _ in range(rand_transforms):\n",
178 | " random_transform = random.choice(possible_transforms)\n",
179 | " (board, play_response) = get_symmetry(board, play_response, random_transform)\n",
180 | " return board, play_response\n",
181 | "\n",
182 | "# Get list of optimal moves w/ responses\n",
183 | "moves = get_moves_from_csv('base_tic_tac_toe_moves.csv')"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {
190 | "id": "YGN4sMEK3Mz5",
191 | "outputId": "a6d78554-4f57-4675-be0e-3a5fd2df10cf"
192 | },
193 | "outputs": [
194 | {
195 | "name": "stdout",
196 | "output_type": "stream",
197 | "text": [
198 | "WARNING:tensorflow:\n",
199 | "The following Variables were used a Lambda layer's call (lambda_4), but\n",
200 | "are not present in its tracked objects:\n",
201 | " \n",
202 | " \n",
203 | "It is possible that this is intended behavior, but it is more likely\n",
204 | "an omission. This is a strong indication that this layer should be\n",
205 | "formulated as a subclassed Layer rather than a Lambda layer.\n",
206 | "WARNING:tensorflow:\n",
207 | "The following Variables were used a Lambda layer's call (lambda_5), but\n",
208 | "are not present in its tracked objects:\n",
209 | " \n",
210 | " \n",
211 | "It is possible that this is intended behavior, but it is more likely\n",
212 | "an omission. This is a strong indication that this layer should be\n",
213 | "formulated as a subclassed Layer rather than a Lambda layer.\n",
214 | "Iteration: 0, Loss: 8.754105567932129\n",
215 | "Iteration: 500, Loss: 2.0992887020111084\n",
216 | "Iteration: 1000, Loss: 1.630592703819275\n",
217 | "Iteration: 1500, Loss: 1.5122987031936646\n",
218 | "Iteration: 2000, Loss: 1.3421776294708252\n",
219 | "Iteration: 2500, Loss: 1.0516871213912964\n",
220 | "Iteration: 3000, Loss: 1.386433720588684\n",
221 | "Iteration: 3500, Loss: 1.2294689416885376\n",
222 | "Iteration: 4000, Loss: 1.0963019132614136\n",
223 | "Iteration: 4500, Loss: 1.2741808891296387\n",
224 | "Iteration: 5000, Loss: 0.7892175912857056\n",
225 | "Iteration: 5500, Loss: 0.9967914819717407\n",
226 | "Iteration: 6000, Loss: 0.9536704421043396\n",
227 | "Iteration: 6500, Loss: 1.1726771593093872\n",
228 | "Iteration: 7000, Loss: 0.8187830448150635\n",
229 | "Iteration: 7500, Loss: 0.8756659030914307\n",
230 | "Iteration: 8000, Loss: 0.883604109287262\n",
231 | "Iteration: 8500, Loss: 0.7775364518165588\n",
232 | "Iteration: 9000, Loss: 0.7878325581550598\n",
233 | "Iteration: 9500, Loss: 0.5939719676971436\n"
234 | ]
235 | }
236 | ],
237 | "source": [
238 | "# Create a train set:\n",
239 | "train_length = 500\n",
240 | "train_set = []\n",
241 | "for t in range(train_length):\n",
242 | " train_set.append(get_rand_move(moves))\n",
243 | "\n",
244 | "# To see if the network learns anything new, we will remove\n",
245 | "# all instances of the board [-1, 0, 0, 1, -1, -1, 0, 0, 1],\n",
246 | "# which the optimal response will be the index '6'. We will\n",
247 | "# Test this at the end.\n",
248 | "test_board = [-1, 0, 0, 1, -1, -1, 0, 0, 1]\n",
249 | "train_set = [x for x in train_set if x[0] != test_board]\n",
250 | "\n",
251 | "def init_weights(shape):\n",
252 | " return tf.Variable(tf.random.normal(shape))\n",
253 | "\n",
254 | "A1 = init_weights([9, 81])\n",
255 | "bias1 = init_weights([81])\n",
256 | "A2 = init_weights([81, 9])\n",
257 | "bias2 = init_weights([9])\n",
258 | "\n",
259 | "# Initialize input data\n",
260 | "X = tf.keras.Input(dtype=tf.float32, batch_input_shape=[None, 9])\n",
261 | "hidden_output = tf.keras.layers.Lambda(lambda x: tf.nn.sigmoid(tf.add(tf.matmul(x, A1), bias1)))(X)\n",
262 | "# Note: we don't take the softmax at the end because our cost function does that for us\n",
263 | "final_output = tf.keras.layers.Lambda(lambda x: tf.add(tf.matmul(x, A2), bias2))(hidden_output)\n",
264 | "model = tf.keras.Model(inputs=X, outputs=final_output, name=\"tic_tac_toe_neural_network\")\n",
265 | "\n",
266 | "optimizer = tf.keras.optimizers.SGD(0.025)\n",
267 | "\n",
268 | "loss_vec = []\n",
269 | "for i in range(10000):\n",
270 | " rand_indices = np.random.choice(range(len(train_set)), batch_size, replace=False)\n",
271 | " batch_data = [train_set[i] for i in rand_indices]\n",
272 | " x_input = [x[0] for x in batch_data]\n",
273 | " y_target = np.array([y[1] for y in batch_data])\n",
274 | "\n",
275 | " # Open a GradientTape.\n",
276 | " with tf.GradientTape(persistent=True) as tape:\n",
277 | "\n",
278 | " # Forward pass.\n",
279 | " output = model(np.array(x_input, dtype=float))\n",
280 | "\n",
281 | " # Apply loss function (Cross Entropy loss)\n",
282 | " loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=y_target))\n",
283 | " loss_vec.append(loss)\n",
284 | "\n",
285 | " # Get gradients of loss with reference to the weights and bias variables to adjust.\n",
286 | " gradients_A1 = tape.gradient(loss, A1)\n",
287 | " gradients_b1 = tape.gradient(loss, bias1)\n",
288 | " gradients_A2 = tape.gradient(loss, A2)\n",
289 | " gradients_b2 = tape.gradient(loss, bias2)\n",
290 | "\n",
291 | " # Update the weights and bias variables of the model.\n",
292 | " optimizer.apply_gradients(zip([gradients_A1, gradients_b1, gradients_A2, gradients_b2],\n",
293 | " [A1, bias1, A2, bias2]))\n",
294 | "\n",
295 | " if i % 500 == 0:\n",
296 | " print('Iteration: {}, Loss: {}'.format(i, loss))"
297 | ]
298 | },
299 | {
300 | "cell_type": "code",
301 | "execution_count": null,
302 | "metadata": {
303 | "id": "H6AdlBe53Mz7",
304 | "outputId": "c5002951-07c3-4d6a-9645-6711f2e6e7a2"
305 | },
306 | "outputs": [
307 | {
308 | "ename": "NameError",
309 | "evalue": "name 'plt' is not defined",
310 | "output_type": "error",
311 | "traceback": [
312 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
313 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
314 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# Print loss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_vec\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'k-'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'Loss'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtitle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Loss (MSE) per Generation'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mxlabel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Generation'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mylabel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Loss'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
315 | "\u001b[0;31mNameError\u001b[0m: name 'plt' is not defined"
316 | ]
317 | }
318 | ],
319 | "source": [
320 | "# Print loss\n",
321 | "plt.plot(loss_vec, 'k-', label='Loss')\n",
322 | "plt.title('Loss (MSE) per Generation')\n",
323 | "plt.xlabel('Generation')\n",
324 | "plt.ylabel('Loss')\n",
325 | "plt.show()"
326 | ]
327 | },
328 | {
329 | "cell_type": "code",
330 | "execution_count": null,
331 | "metadata": {
332 | "id": "jCNleWTf3Mz9",
333 | "outputId": "50872774-efb2-491a-e793-b80768c1dcbf"
334 | },
335 | "outputs": [
336 | {
337 | "name": "stdout",
338 | "output_type": "stream",
339 | "text": [
340 | "tf.Tensor([8], shape=(1,), dtype=int64)\n"
341 | ]
342 | }
343 | ],
344 | "source": [
345 | "# Make Prediction:\n",
346 | "test_boards = [test_board]\n",
347 | "logits = model.predict(test_boards)\n",
348 | "predictions = tf.argmax(logits, 1)\n",
349 | "print(predictions)"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": null,
355 | "metadata": {
356 | "id": "wQVOIXcD3Mz_",
357 | "outputId": "368b385a-39d4-4e40-bb6e-7021469d6aae"
358 | },
359 | "outputs": [
360 | {
361 | "name": "stdin",
362 | "output_type": "stream",
363 | "text": [
364 | "Input index of your move (0-8): 0\n"
365 | ]
366 | },
367 | {
368 | "name": "stdout",
369 | "output_type": "stream",
370 | "text": [
371 | "Model has moved\n",
372 | " X | | \n",
373 | "___________\n",
374 | " | | \n",
375 | "___________\n",
376 | " O | | \n"
377 | ]
378 | },
379 | {
380 | "name": "stdin",
381 | "output_type": "stream",
382 | "text": [
383 | "Input index of your move (0-8): 3\n"
384 | ]
385 | },
386 | {
387 | "name": "stdout",
388 | "output_type": "stream",
389 | "text": [
390 | "Model has moved\n",
391 | " X | | O\n",
392 | "___________\n",
393 | " X | | \n",
394 | "___________\n",
395 | " O | | \n"
396 | ]
397 | },
398 | {
399 | "name": "stdin",
400 | "output_type": "stream",
401 | "text": [
402 | "Input index of your move (0-8): 1\n"
403 | ]
404 | },
405 | {
406 | "name": "stdout",
407 | "output_type": "stream",
408 | "text": [
409 | "Model has moved\n",
410 | " X | X | O\n",
411 | "___________\n",
412 | " X | | \n",
413 | "___________\n",
414 | " O | | O\n"
415 | ]
416 | },
417 | {
418 | "name": "stdin",
419 | "output_type": "stream",
420 | "text": [
421 | "Input index of your move (0-8): 5\n"
422 | ]
423 | },
424 | {
425 | "name": "stdout",
426 | "output_type": "stream",
427 | "text": [
428 | "Model has moved\n",
429 | " X | X | O\n",
430 | "___________\n",
431 | " X | O | X\n",
432 | "___________\n",
433 | " O | | O\n",
434 | "Game Over!\n"
435 | ]
436 | }
437 | ],
438 | "source": [
439 | "# Declare function to check for win\n",
440 | "def check(board):\n",
441 | " wins = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]\n",
442 | " for ix in range(len(wins)):\n",
443 | " if board[wins[ix][0]] == board[wins[ix][1]] == board[wins[ix][2]] == 1.:\n",
444 | " return 1\n",
445 | " elif board[wins[ix][0]] == board[wins[ix][1]] == board[wins[ix][2]] == -1.:\n",
446 | " return -1\n",
447 | " return 0\n",
448 | "\n",
449 | "# Let's play against our model\n",
450 | "game_tracker = [0., 0., 0., 0., 0., 0., 0., 0., 0.]\n",
451 | "win_logical = False\n",
452 | "num_moves = 0\n",
453 | "while not win_logical:\n",
454 | " player_index = input('Input index of your move (0-8): ')\n",
455 | " num_moves += 1\n",
456 | " # Add player move to game\n",
457 | " game_tracker[int(player_index)] = 1.\n",
458 | " \n",
459 | " # Get model's move by first getting all the logits for each index\n",
460 | " [potential_moves] = model(np.array([game_tracker], dtype=float))\n",
461 | " # Now find allowed moves (where game tracker values = 0.0)\n",
462 | " allowed_moves = [ix for ix, x in enumerate(game_tracker) if x == 0.0]\n",
463 | " # Find best move by taking argmax of logits if they are in allowed moves\n",
464 | " model_move = np.argmax([x if ix in allowed_moves else -999.0 for ix, x in enumerate(potential_moves)])\n",
465 | " \n",
466 | " # Add model move to game\n",
467 | " game_tracker[int(model_move)] = -1.\n",
468 | " print('Model has moved')\n",
469 | " print_board(game_tracker)\n",
470 | " # Now check for win or too many moves\n",
471 | " if check(game_tracker) == -1 or num_moves >= 5:\n",
472 | " print('Game Over!')\n",
473 | " win_logical = True\n",
474 | " elif check(game_tracker) == 1:\n",
475 | " print('Congratulations, You won!')\n",
476 | " win_logical = True\n"
477 | ]
478 | },
479 | {
480 | "cell_type": "code",
481 | "execution_count": null,
482 | "metadata": {
483 | "id": "Q1gSBmxG3M0B"
484 | },
485 | "outputs": [],
486 | "source": []
487 | }
488 | ],
489 | "metadata": {
490 | "colab": {
491 | "name": "TicTacToe.ipynb",
492 | "provenance": []
493 | },
494 | "kernelspec": {
495 | "display_name": "tf2",
496 | "language": "python",
497 | "name": "tf2"
498 | },
499 | "language_info": {
500 | "codemirror_mode": {
501 | "name": "ipython",
502 | "version": 3
503 | },
504 | "file_extension": ".py",
505 | "mimetype": "text/x-python",
506 | "name": "python",
507 | "nbconvert_exporter": "python",
508 | "pygments_lexer": "ipython3",
509 | "version": "3.7.7"
510 | }
511 | },
512 | "nbformat": 4,
513 | "nbformat_minor": 4
514 | }
515 |
--------------------------------------------------------------------------------