├── .dockerignore
├── .gitignore
├── Dockerfile
├── Dockerfile-TFv2
├── LICENSE.txt
├── README.md
├── TFv2
├── Device Stats.ipynb
├── README.md
├── ch02
│ ├── Exercise 2.01.ipynb
│ ├── Exercise 2.02.ipynb
│ ├── Exercise 2.03.ipynb
│ ├── Listing 2.03.ipynb
│ ├── Listing 2.04.ipynb
│ ├── Listing 2.05.ipynb
│ ├── Listing 2.06.ipynb
│ ├── Listing 2.07.ipynb
│ ├── Listing 2.08.ipynb
│ ├── Listing 2.09.ipynb
│ ├── Listing 2.10 - 2.11.ipynb
│ ├── Listing 2.12 - 2.16.ipynb
│ └── tf_ckpts
│ │ ├── checkpoint
│ │ ├── ckpt-1.data-00000-of-00001
│ │ ├── ckpt-1.index
│ │ ├── ckpt-2.data-00000-of-00001
│ │ ├── ckpt-2.index
│ │ ├── ckpt-3.data-00000-of-00001
│ │ └── ckpt-3.index
├── ch03
│ ├── Listing 3.01 - 3.02.ipynb
│ ├── Listing 3.03.ipynb
│ └── Listing 3.04 - 3.05.ipynb
├── ch04
│ └── Listing 4.01 - 4.06.ipynb
├── ch05
│ ├── Listing 5.01 - 5.04.ipynb
│ ├── Listing 5.05.ipynb
│ ├── Listing 5.06 - 5.07.ipynb
│ └── Listing 5.08 - 5.11.ipynb
├── ch06
│ └── Listing 6.01 - 6.17.ipynb
├── ch07
│ ├── Listing 7.01.ipynb
│ ├── Listing 7.02.ipynb
│ ├── Listing 7.03.ipynb
│ ├── Listing 7.04.ipynb
│ ├── Listing 7.05 - 7.06.ipynb
│ └── Listing 7.07 - 7.12.ipynb
├── ch08
│ └── Listing 8.01 - 8.08.ipynb
├── ch09
│ ├── Listing 9.01 - 9.06.ipynb
│ └── Listing 9.07 - 9.11.ipynb
├── ch10
│ └── Listing 10.01 - 10.30.ipynb
├── ch11
│ └── Listing 11.01 - 11.08.ipynb
├── ch12
│ ├── Listing 12.01 - 12.14.ipynb
│ └── Stacked Autoencoder MNIST.ipynb
├── ch13
│ ├── Listing 13.01 - 13.04.ipynb
│ ├── Listing 13.05 - 13.09.ipynb
│ └── Listing 13.10.ipynb
├── ch14
│ ├── Listing 14.01 - 14.10.ipynb
│ └── Listing 14.11 - 14.16.ipynb
├── ch15
│ ├── Listing 15.01 - 15.13.ipynb
│ ├── Listing 15.14 - 15.23.ipynb
│ ├── VGG Face Estimator.ipynb
│ └── vgg-face.ipynb
├── ch16
│ ├── Listing 16.01 - 16.06.ipynb
│ └── Listing 16.07 - 16.09.ipynb
├── ch17
│ ├── Listing 17.01 - 17.16.ipynb
│ └── TF Datasets and LibriSpeech.ipynb
├── ch18
│ ├── Listing 18-eoc-assign.ipynb
│ ├── Listing 18.01 - 18.04.ipynb
│ └── Listing 18.05 - 18.08.ipynb
└── ch19
│ ├── Listing 19.01 - 19.10.ipynb
│ └── Listing 19.11 - 19.20.ipynb
├── build_TFv2_environment.sh
├── build_environment.sh
├── ch02
├── Listing 2.01.ipynb
├── Listing 2.03.ipynb
├── Listing 2.04 - 2.06.ipynb
├── Listing 2.07 - 2.08.ipynb
├── Listing 2.09.ipynb
├── Listing 2.10.ipynb
├── Listing 2.11.ipynb
├── Listing 2.14.ipynb
└── Listing 2.16.ipynb
├── ch03
├── Listing 3.01.ipynb
├── Listing 3.02.ipynb
├── Listing 3.03.ipynb
└── Listing 3.04-3.05.ipynb
├── ch04
└── Listing 4.01-4.06.ipynb
├── ch05
├── Listing 5.01 - 5.04.ipynb
├── Listing 5.05.ipynb
├── Listing 5.06 - 5.07.ipynb
└── Listing 5.08 - 5.10.ipynb
├── ch06
└── Listing 6.01 - 6.17.ipynb
├── ch07
├── Listing 7.01.ipynb
├── Listing 7.02.ipynb
├── Listing 7.03.ipynb
├── Listing 7.04.ipynb
├── Listing 7.05-7.06.ipynb
└── Listing 7.07 - 7.12.ipynb
├── ch08
└── Listing 8.01 - 8.08.ipynb
├── ch09
├── Listing 9.01-9.06.ipynb
└── Listing 9.07-9.11.ipynb
├── ch10
└── Listing 10.01 - 10.30.ipynb
├── ch11
├── Listing 11.01-11.06.ipynb
└── Listing 11.07-11.08.ipynb
├── ch12
├── Listing 12.01 - 12.14.ipynb
└── Stacked Autoencoder MNIST.ipynb
├── ch13
├── Listing 13.01 - 13.04.ipynb
├── Listing 13.05 - 13.09.ipynb
└── Listing 13.10.ipynb
├── ch14
├── Listing 14.01-14.10.ipynb
└── Listing 14.11 - 14.16.ipynb
├── ch15
├── Listing 15.01 - 15.13.ipynb
├── Listing 15.14 - 15.23.ipynb
├── VGG Face Estimator.ipynb
├── train-hvd-vgg-ckpt-warmup.py
├── train-hvd-vgg.py
├── vgg-face.ipynb
├── vgg_create_imgs.py
└── vgg_valid_url.py
├── ch16
├── Listing 16.01 - 16.06.ipynb
└── Listing 16.07 - 16.09.ipynb
├── ch17
├── Listing 17.01 - 17.16.ipynb
└── TF Datasets and LibriSpeech.ipynb
├── ch18
├── Listing 18-eoc-assign.ipynb
├── Listing 18.01 - 18.04.ipynb
└── Listing 18.05 - 18.08.ipynb
├── ch19
├── Listing 19.01 - 19.10.ipynb
└── Listing 19.11 - 19.20.ipynb
├── data
└── delete.txt
├── download-data.sh
├── download-libs.sh
├── figs
├── Figures.graffle
├── Figures2.graffle
│ ├── data.plist
│ ├── image1.png
│ ├── image2.png
│ ├── image3.png
│ ├── image4.png
│ └── image5.png
├── android_pos_model.png
├── ch06-final-bow-model.png
├── ch10-apertium-p1.png
├── ch10-apertium-p2.png
├── ch12-autoencoder.png
├── ch13-network.png
├── ch13-q-function.png
├── ch15-cnn-arch.png
└── mltf2-cover.jpeg
├── mltf-entrypoint.sh
├── models
└── delete.txt
├── requirements-gpu-py2.txt
├── requirements-py2.txt
├── requirements-tf2.txt
├── requirements.txt
├── run_TFv2_environment.sh
└── run_environment.sh
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 | Dockerfile
3 | Pipfile
4 | run_environment.sh
5 | build_environment.sh
6 | libs
7 | models
8 | papers
9 | data
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | models/checkpoint
2 | libs
3 | data/seq2seq*
4 | data/*.csv
5 | data/cache
6 | data/vgg*
7 | data/tb_files*
8 | .ipynb_checkpoints
9 | aclImdb*
10 | 311.csv
11 | word2vec-nlp-tutorial*
12 | *.pyc
13 | BregmanToolkit
14 | Document Classifier.ipynb
15 | TalkingMachinesPodcast.wav
16 | audio_dataset
17 | doc-sensitivity*
18 | *.zip
19 | *.pdf
20 | *.tar.gz
21 | *.tar
22 | User Identification From Walking Activity
23 | mobypos.txt
24 | cifar-10*
25 | cifar10-model
26 | vgg_face_dataset
27 | vgg_face_full.csv
28 | logs
29 | aug*.npy
30 | vgg-face
31 | vgg_face_full_urls.csv
32 | vgg-face-1000epochs*
33 | *.h5
34 | *.png
35 | ch10-model*
36 | international-airline-passengers.csv
37 | coronawhy
38 | en-doc-sensitivity-logreg*
39 | vgg-models
40 | *clean_sensitivity*
41 | foobar
42 | COVID-19
43 | MNIST_data
44 | log.csv
45 | *.mat
46 | cuda*.sh
47 | LibriSpeech*
48 | RNN-Tutorial
49 | models/seq2seq*
50 | models/*.ckpt*
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:1.15.2-gpu-py3-jupyter
2 |
3 | ENV CONTAINER_USER_ID="mltf2" \
4 | CONTAINER_GROUP_ID="mltf2"
5 |
6 | WORKDIR /usr/src/mltf2
7 | COPY download-libs.sh .
8 | COPY download-data.sh .
9 |
10 | RUN apt-get update\
11 | && apt-get full-upgrade -y\
12 | && apt-get autoremove -y\
13 | && apt-get install --no-install-recommends -y\
14 | cmake\
15 | gcc\
16 | g++\
17 | mpi-default-bin\
18 | pkg-config\
19 | libpng-dev\
20 | libfreetype6-dev\
21 | libsndfile1-dev\
22 | libsm6\
23 | curl\
24 | zlib1g-dev\
25 | zlib1g\
26 | libssl-dev\
27 | libffi-dev\
28 | zip\
29 | unzip\
30 | openjdk-11-jdk-headless\
31 | lbzip2\
32 | ffmpeg\
33 | && apt-get clean all\
34 | && useradd -U -d /home/mltf2 -s /bin/sh ${CONTAINER_USER_ID}\
35 | && mkdir /home/mltf2\
36 | && chown -R mltf2:mltf2 /home/mltf2\
37 | && mkdir -p /usr/src/mltf2/\
38 | && cd /usr/src/mltf2/\
39 | && mkdir -p data/cache data/logs models libs\
40 | && ./download-libs.sh\
41 | && cd /usr/src/mltf2\
42 | && ./download-data.sh
43 |
44 | # Python 3 dependencies
45 | COPY requirements.txt /tmp
46 | # Python 2 dependencies
47 | COPY requirements-py2.txt /tmp
48 |
49 | # Install deps and bulid custom Python2 for Bregman Toolkit and VGG16.py
50 | RUN pip install -r /tmp/requirements.txt\
51 | && mkdir -p /usr/src/python27\
52 | && cd /usr/src/python27\
53 | && curl -O https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tar.xz\
54 | && tar xvf Python-2.7.18.tar.xz\
55 | && cd /usr/src/python27/Python-2.7.18\
56 | && sh ./configure --enable-shared --prefix=/usr/install/python27 --enable-unicode=ucs4\
57 | && make -j4 && make install\
58 | && cd .. && rm -rf Python-2.7.18.tar.xz Python-2.7.18\
59 | && echo "/usr/install/python27/lib/" >> /etc/ld.so.conf\
60 | && ldconfig\
61 | && cd /usr/install/python27/bin\
62 | && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\
63 | && /usr/install/python27/bin/python2.7 get-pip.py\
64 | && /usr/install/python27/bin/pip2.7 install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp27-none-linux_x86_64.whl\
65 | && /usr/install/python27/bin/pip2.7 install -r /tmp/requirements-py2.txt\
66 | && /usr/install/python27/bin/python2.7 -m pip install ipykernel\
67 | && cd /usr/src/mltf2/libs/BregmanToolkit\
68 | && /usr/install/python27/bin/python setup.py install\
69 | && rm -rf /root/.cache
70 |
71 |
72 | WORKDIR /usr/src/mltf2
73 |
74 | COPY ch02 /usr/src/mltf2/ch02
75 | COPY ch03 /usr/src/mltf2/ch03
76 | COPY ch04 /usr/src/mltf2/ch04
77 | COPY ch05 /usr/src/mltf2/ch05
78 | COPY ch06 /usr/src/mltf2/ch06
79 | COPY ch07 /usr/src/mltf2/ch07
80 | COPY ch08 /usr/src/mltf2/ch08
81 | COPY ch09 /usr/src/mltf2/ch09
82 | COPY ch10 /usr/src/mltf2/ch10
83 | COPY ch11 /usr/src/mltf2/ch11
84 | COPY ch12 /usr/src/mltf2/ch12
85 | COPY ch13 /usr/src/mltf2/ch13
86 | COPY ch14 /usr/src/mltf2/ch14
87 | COPY ch15 /usr/src/mltf2/ch15
88 | COPY ch16 /usr/src/mltf2/ch16
89 | COPY ch17 /usr/src/mltf2/ch17
90 | COPY ch18 /usr/src/mltf2/ch18
91 | COPY ch19 /usr/src/mltf2/ch19
92 |
93 | COPY figs /usr/src/mltf2/figs
94 |
95 | RUN chown -R mltf2:mltf2 ch* models data/cache data/logs
96 | COPY mltf-entrypoint.sh .
97 | USER mltf2
98 |
99 | ENTRYPOINT ["bash", "/usr/src/mltf2/mltf-entrypoint.sh"]
100 |
--------------------------------------------------------------------------------
/Dockerfile-TFv2:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.3.0-gpu-jupyter
2 |
3 | ENV CONTAINER_USER_ID="mltf2" \
4 | CONTAINER_GROUP_ID="mltf2"
5 |
6 | WORKDIR /usr/src/mltf2
7 | COPY download-libs.sh .
8 | COPY download-data.sh .
9 |
10 | RUN apt-get update\
11 | && apt-get full-upgrade -y\
12 | && apt-get autoremove -y\
13 | && apt-get install --no-install-recommends -y\
14 | cmake\
15 | gcc\
16 | g++\
17 | mpi-default-bin\
18 | pkg-config\
19 | libpng-dev\
20 | libfreetype6-dev\
21 | libsndfile1-dev\
22 | libsm6\
23 | curl\
24 | zlib1g-dev\
25 | zlib1g\
26 | libssl-dev\
27 | libffi-dev\
28 | zip\
29 | unzip\
30 | openjdk-11-jdk-headless\
31 | lbzip2\
32 | ffmpeg\
33 | && apt-get clean all\
34 | && useradd -U -d /home/mltf2 -s /bin/sh ${CONTAINER_USER_ID}\
35 | && mkdir /home/mltf2\
36 | && chown -R mltf2:mltf2 /home/mltf2\
37 | && mkdir -p /usr/src/mltf2/\
38 | && cd /usr/src/mltf2/\
39 | && mkdir -p data/cache data/logs models libs\
40 | && ./download-libs.sh\
41 | && cd /usr/src/mltf2\
42 | && ./download-data.sh
43 |
44 | COPY requirements-tf2.txt /tmp/requirements.txt
45 | RUN pip install -U pip && pip install -r /tmp/requirements.txt && rm -rf /root/.cache
46 |
47 | WORKDIR /usr/src/mltf2
48 |
49 | COPY ./TFv2/ch02 /usr/src/mltf2/ch02
50 | COPY ./TFv2/ch03 /usr/src/mltf2/ch03
51 | COPY ./TFv2/ch04 /usr/src/mltf2/ch04
52 | COPY ./TFv2/ch05 /usr/src/mltf2/ch05
53 | COPY ./TFv2/ch06 /usr/src/mltf2/ch06
54 | COPY ./TFv2/ch07 /usr/src/mltf2/ch07
55 | COPY ./TFv2/ch08 /usr/src/mltf2/ch08
56 | COPY ./TFv2/ch09 /usr/src/mltf2/ch09
57 | COPY ./TFv2/ch10 /usr/src/mltf2/ch10
58 | COPY ./TFv2/ch11 /usr/src/mltf2/ch11
59 | COPY ./TFv2/ch12 /usr/src/mltf2/ch12
60 | COPY ./TFv2/ch13 /usr/src/mltf2/ch13
61 | COPY ./TFv2/ch14 /usr/src/mltf2/ch14
62 | COPY ./TFv2/ch15 /usr/src/mltf2/ch15
63 | COPY ./TFv2/ch16 /usr/src/mltf2/ch16
64 | COPY ./TFv2/ch17 /usr/src/mltf2/ch17
65 | COPY ./TFv2/ch18 /usr/src/mltf2/ch18
66 | COPY ./TFv2/ch19 /usr/src/mltf2/ch19
67 |
68 | COPY figs /usr/src/mltf2/figs
69 |
70 | RUN chown -R mltf2:mltf2 ch* models data/cache data/logs libs
71 | COPY mltf-entrypoint.sh .
72 | USER mltf2
73 |
74 | ENTRYPOINT ["bash", "/usr/src/mltf2/mltf-entrypoint.sh"]
75 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Machine Learning with TensorFlow, 2nd Edition
2 | =============================================
3 |
4 |
5 | This is the code repository for the 2nd edition of [Manning Publications'](http://manning.com/)
6 | [Machine Learning with TensorFlow](https://www.manning.com/books/machine-learning-with-tensorflow-second-edition?a_aid=5700fc87&a_bid=1e05f0bb)
7 | written by [Chris Mattmann](http://twitter.com/chrismattmann/).
8 |
9 | The code in this repository is mostly [Jupyter Notebooks](http://jupyter.org/) that correspond
10 | to the numbered listings in each chapter of the book. The code has beeen tested with
11 | [TensorFlow 1.15.2](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf) but there
12 | is a complete porting of the code in the book to [TensorFlow 2.x](https://github.com/chrismattmann/MLwithTensorFlow2ed/tree/master/TFv2).
13 |
14 | We welcome contributions to the TF2 port and to all of the notebooks in TF1.15.x too!
15 |
16 | ## Quick Start
17 |
18 | The repository contains two fully functional [Docker](https://hub.docker.com/r/chrismattmann/mltf2/tags?page=1&ordering=last_updated) images. The first `latest` runs with TF1.15.x and tracks with the book examples. You can get going by simply running from a command prompt:
19 |
20 | ```
21 | $ docker pull chrismattmann/mltf2:latest
22 | $ ./run_environment.sh
23 | ```
24 |
25 | This will pull the TF1.15.x image and start Juptyer running on localhost. Watch for the startup
26 | message, and then click through (including the token needed past the `?` in the URL) to start
27 | Your Juptyer session.
28 |
29 | To run the TF2.x version of the code and notebooks you can similarly run the `tf2` tag:
30 |
31 | ```
32 | $ docker pull chrismattmann/mltf2:tf2
33 | $ ./run_TFv2_environment.sh
34 | ```
35 |
36 | Follow the URL from the startup message.
37 |
38 | Enjoy!
39 |
40 | ## Pre-requisites
41 |
42 | Though the book has [TensorFlow](http://twitter.com/tensorflow) in the name, the book is also
43 | just as machine about generalized machine learning and its theory, and the suite of frameworks
44 | that also come in handy when dealing with machine learning. The requirements for running the
45 | notebooks are below. You should PIP install them using your favorite Python. The examples from
46 | the book have been shown to work in Python 2.7, and Python 3.7. I didn't have time to test all
47 | of them but we are happy to receive PRs for things we've missed.
48 |
49 | Additionally the [Docker](Dockerfile) has been tested and on latest Docker for Mac only adds
50 | about 1.5% overhead on CPU mode and is totally usable and a one-shot easy installer for all of
51 | the dependencies. Browse the file to see what you'll need to install and how to run the
52 | code locally if desired.
53 |
54 | * TensorFlow
55 | * Jupyter
56 | * Pandas - for data frames and easy tabular data manipulation
57 | * NumPy, SciPy
58 | * Matplotlib
59 | * [NLTK](http://nltk.org/) - for anything text or NLP (such as Sentiment Analysis from Chapter 6)
60 | * TQDM - for progress bars
61 | * SKLearn - for various helper functions
62 | * [Bregman Toolkit](https://github.com/bregmanstudio/BregmanToolkit/) (for audio examples in Chapter 7)
63 | * [Tika](http://github.com/chrismattmann/tika-python)
64 | * [Ystockquote](https://github.com/cgoldberg/ystockquote)
65 | * Requests
66 | * [OpenCV](http://opencv.org/)
67 | * [Horovod](https://github.com/horovod/horovod) - use 0.18.2 (or 0.18.1) for use with the Maverick2 VGG Face model.
68 | * [VGG16](https://www.cs.toronto.edu/~frossard/post/vgg16/) - grab `vgg16.py` and `vgg16_weights.npz`, `imagenet_classes.py` and `laska.png` - only works with Python2.7, place
69 | in the `lib` directory.
70 | * PyDub - for Chapter 17 in the LSTM chapter.
71 | * [Basic Units](https://raw.githubusercontent.com/matplotlib/matplotlib/master/examples/units/basic_units.py) - for use in Chapter 17. Place in `libs/basic_units/` folder.
72 | * [RNN-Tutorial](https://github.com/mrubash1/RNN-Tutorial/) - used in Chapter 17 to help implement the deep speech model and train it.
73 |
74 | ## Data Requirements
75 |
76 | You will generate lots of data when running the notebooks in particular building models. But to train and
77 | build those models you will also need data. I have created an easy [DropBox](http://dropbox.com/) folder
78 | for you to pull input data for use in training models from the book. Access the DropBox folder
79 | [here](https://www.dropbox.com/sh/abjqqcwuzx2mttd/AADIM01H44Y-tdAHXUHt5ZWFa?dl=0).
80 |
81 | Note that the Docker build described below automatically pulls down all the data for you and incorporates it
82 | into the Docker environment so that you don't have to download a thing.
83 |
84 | The pointers below let you know what data you need for what chapters, and where to put it. Unless otherwise
85 | not specified, the data should be placed into the `data` folder. Note that as you are running the notebooks
86 | the notebooks will generate TF models and write them and `checkpoint` files to the `models/` folder.
87 |
88 | ## Data Input requirements
89 |
90 | ### Chapter 4
91 | - `data/311.csv`
92 |
93 | ### Chapter 6
94 | - `data/word2vec-nlp-tutorial/labeledTrainData.tsv`
95 | - `data/word2vec-nlp-tutorial/testData.tsv`
96 | - `data/aclImdb/test/neg/`
97 | - `data/aclImdb/test/pos/`
98 |
99 | ### Chapter 7
100 | - `data/audio_dataset/`
101 | - `data/TalkingMachinesPodcast.wav`
102 |
103 | ### Chapter 8
104 | - `data/User Identification From Walking Activity/`
105 |
106 | ### Chapter 10
107 | - `data/mobypos.txt`
108 |
109 | ### Chapter 12
110 | - `data/cifar-10-batches-py`
111 | - `data/MNIST_data/` (if you try the MNIST extra example)
112 |
113 | ### Chapter 14
114 | - `data/cifar-10-batches-py`
115 |
116 | ### Chapter 15
117 | - `data/cifar-10-batches-py`
118 | - `data/vgg_face_dataset` - The VGG face metadata including Celeb Names
119 | - `data/vgg-face` - The actual VGG face data
120 | - `data/vgg_face_full_urls.csv` - Metadata informmation about VGG Face URLs
121 | - `data/vgg_face_full.csv` - Metadata information about all VGG Face data
122 | - `data/vgg-models/checkpoints-1e3x4-2e4-09202019` - To run the VGG Face Estimator additional example
123 | - `models/vgg_face_weights.h5` - To run the VGG Face verification additional example
124 |
125 | ### Chapter 16
126 | - `data/international-airline-passengers.csv`
127 |
128 | ### Chapter 17
129 | - `data/LibriSpeech`
130 | - `libs/basic_units/`
131 | - `libs/RNN-Tutorial/`
132 |
133 | ### Chapter 18
134 | - `data/seq2seq`
135 |
136 | ### Chapter 19
137 | - `libs/vgg16/laska.png`
138 | - `data/cloth_folding_rgb_vids`
139 |
140 | ## Setting up the environment (Tested on Mac and Linux)
141 |
142 | ### Using Docker
143 |
144 | #### Building the image
145 |
146 | ```shell
147 | # Only builds a Docker compatible with GPU and CPU.
148 | ./build_environment.sh #TensorFlow1
149 | ./build_TFv2_environment.sh #TensorFlow2
150 | ```
151 |
152 | #### Running the notebook from docker
153 |
154 | ```shell
155 | # Runs in GPU and CPU mode and will look for NVIDIA drivers first and fall back to reg CPU.
156 | ./run_environment.sh #TensorFlow1
157 | ./run_TFv2_environment.sh # TensorFlow2
158 | ```
159 |
160 | #### Using a GPU
161 |
162 | You need to install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to use your
163 | GPU in docker. Follow these instructions (also on the linked page)
164 |
165 | ```shell
166 | # Add the package repositories
167 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
168 | curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
169 | curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
170 |
171 | sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
172 | sudo systemctl restart docker
173 | ```
174 |
175 | ### Using your local python
176 |
177 | #### Building the environment
178 |
179 | If you want to build with your existing Python that's fine
180 | you will need a Python2.7 for some of the chapters noted
181 | above (like chapter7 which uses `BregmanToolkit`), and
182 | python 3.7 for everything else. The requirements.txt file
183 | is different for each, so watch while one to pip install
184 | below.
185 |
186 | ```shell
187 | #Python3.7 - GPU and CPU
188 | $ pip3.7 install -r requirements.txt
189 |
190 | #Python3.7 - TensorFlow2 GPU and CPU
191 | $ pip3.7 install -r requirements-tf2.txt
192 |
193 | #Python2.7 - CPU
194 | $ pip2.7 install -r requirements-py2.txt
195 |
196 | #Python2.7 - GPU
197 | $ pip2.7 install -r requirements-gpu-py2.txt
198 | ```
199 |
200 | #### Running the notebook from your local environment
201 |
202 | ```shell
203 | $ jupyter notebook
204 | ```
205 |
206 | Questions, comments?
207 | ===================
208 | Send them to [Chris A. Mattmann](mailto:chris.mattmann@gmail.com).
209 | Also please consider heading over to the [livebook forum](https://livebook.manning.com/#!/book/machine-learning-with-tensorflow-second-edition/discussion) where you can discuss the book with other readers and the author too.
210 |
211 | Contributors
212 | ============
213 | * Chris A. Mattmann
214 | * Rob Royce (`tensorflow2` branch)
215 | * [Philip Southam](https://github.com/philipsoutham) (Dockerfile build in `docker` branch)
216 |
217 | License
218 | =======
219 | [Apache License, version 2](http://www.apache.org/licenses/LICENSE-2.0)
220 |
--------------------------------------------------------------------------------
/TFv2/Device Stats.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Device Stats\n",
8 | "---\n",
9 | "\n",
10 | "The following is for informational purposes only."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 1,
16 | "metadata": {},
17 | "outputs": [
18 | {
19 | "name": "stdout",
20 | "output_type": "stream",
21 | "text": [
22 | "[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]\n",
23 | "/device:GPU:0\n",
24 | "True\n",
25 | "2.3.0\n"
26 | ]
27 | }
28 | ],
29 | "source": [
30 | "import tensorflow as tf\n",
31 | "print(tf.config.list_physical_devices('GPU'))\n",
32 | "print(tf.test.gpu_device_name())\n",
33 | "print(tf.test.is_built_with_cuda())\n",
34 | "print(tf.__version__)"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "# Hardware\n",
42 | "---\n",
43 | "\n",
44 | "The machine used to build and run the listings within this repository used the following (primary) hardware components:\n",
45 | "\n",
46 | "**GPU:**\n",
47 | "[Nvidia RTX 2070 Super](https://www.nvidia.com/en-us/geforce/graphics-cards/rtx-2070-super/)\n",
48 | "\n",
49 | "**CPU:**\n",
50 | "[Intel Core i5-9600K @ 3.70GHz](https://ark.intel.com/content/www/us/en/ark/products/134896/intel-core-i5-9600k-processor-9m-cache-up-to-4-60-ghz.html)\n",
51 | "\n",
52 | "**Memory:**\n",
53 | "[Corsair 16GB (2x 8GB) DDR4 PC4-28800 (3600MHz)](https://www.corsair.com/ww/en/Categories/Products/Memory/Vengeance-PRO-RGB-Black/p/CMW16GX4M2C3600C18#)"
54 | ]
55 | },
56 | {
57 | "cell_type": "raw",
58 | "metadata": {},
59 | "source": [
60 | "+-----------------------------------------------------------------------------+\n",
61 | "| NVIDIA-SMI 440.100 Driver Version: 440.100 CUDA Version: 10.2 |\n",
62 | "|-------------------------------+----------------------+----------------------+\n",
63 | "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
64 | "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
65 | "|===============================+======================+======================|\n",
66 | "| 0 GeForce RTX 207... Off | 00000000:01:00.0 Off | N/A |\n",
67 | "| 40% 39C P8 13W / 215W | 258MiB / 7959MiB | 0% Default |\n",
68 | "+-------------------------------+----------------------+----------------------+\n",
69 | "\n",
70 | "+-----------------------------------------------------------------------------+\n",
71 | "| Processes: GPU Memory |\n",
72 | "| GPU PID Type Process name Usage |\n",
73 | "|=============================================================================|\n",
74 | "| 0 4068 C /home/rob/.pyenv/versions/3.8.4/bin/python 247MiB |\n",
75 | "+-----------------------------------------------------------------------------+"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": []
84 | }
85 | ],
86 | "metadata": {
87 | "kernelspec": {
88 | "display_name": "Python 3",
89 | "language": "python",
90 | "name": "python3"
91 | },
92 | "language_info": {
93 | "codemirror_mode": {
94 | "name": "ipython",
95 | "version": 3
96 | },
97 | "file_extension": ".py",
98 | "mimetype": "text/x-python",
99 | "name": "python",
100 | "nbconvert_exporter": "python",
101 | "pygments_lexer": "ipython3",
102 | "version": "3.8.4"
103 | }
104 | },
105 | "nbformat": 4,
106 | "nbformat_minor": 4
107 | }
108 |
--------------------------------------------------------------------------------
/TFv2/README.md:
--------------------------------------------------------------------------------
1 | # MLwithTensorFlow2-2ed
2 | ---
3 |
4 | Exercises and listings for ["Machine Learning with TensorFlow 2nd Edition"](https://www.manning.com/books/machine-learning-with-tensorflow-second-edition) using TensorFlow v2. The original listings, which use TensorFlow v1 are available on [Chris Mattmann's GitHub page](https://github.com/chrismattmann/MLwithTensorFlow2ed)
5 |
6 | ## Methodology
7 | ---
8 |
9 | Tensorflow v2 has introduced a variety of breaking changes. Some of these changes affect workflow, where others require adopting entirely new paradigms. [Eager Execution](https://www.tensorflow.org/guide/eager), for example, requires a change from Declarative to Imperative Programming. We no longer use `Placeholder`'s, and we rely on different libraries to accomplish tasks that have been deprecated in v2. The examples, exercises, and listings in the text and on Chris' GitHub page will be translated from TensorFlow to TensorFlow2 using the following methodologies:
10 |
11 | - We use the official [TF v1 to TF v2 migration guide](https://www.tensorflow.org/guide/migrate) wherever possible.
12 | - When the migration guide does not suffice, we attempt to _replicate results_ attained in the text and in Chris' github repository.
13 |
14 |
15 |
16 | For anyone interested in how a more elaborate project in Tensorflow would be migrated from v1 to v2, we encourage you to check out the migration guide linked above, and also see whether the official [upgrade script](https://www.tensorflow.org/guide/upgrade) would work for your situation. Note that we are not attempting to use the upgrade script in this repository for two reasons:
17 |
18 | - 1 ["The conversion script automates as much as possible, but there are still syntactical and stylistic changes that cannot be performed by the script."](https://www.tensorflow.org/guide/upgrade)
19 | - 2 There is value for the author (of this repository) in fully examining the changes from TF v1 to TF v2 (i.e., this is a learning experience).
20 |
21 | ## Contributions
22 | ---
23 |
24 | Contributions are more then welcome. This repository and the contents there within are in the public domain, subject to the terms and conditions (if any) laid out by Manning Publications (distributor), Chris Mattmann (author), or any other binding agreements between the user of this repository and the proprietor of the source material.
25 |
26 | ## Disclaimer
27 | ---
28 |
29 | The users of this repository shall have no expectation in terms of correctness or thoroughness. The author(s) have attempted to correctly translate the original source material, but there are many reasons why the v2 source might be wildly different from that of the v1 source. If there are issues with the code and/or documentation, please submit a pull request and/or contact the owner of this repository.
30 |
31 | ## Asking for help
32 | ---
33 |
34 | We will make reasonable attempts to address issues in the code, but please be aware that the viability of a solution will be judged by the contents of its output. The Jupyter Notebooks herein capture the output of the execution and persist that output when pushed to Github. We _do not_ plan to update this repository as new Tensorflow versions are released. This repository is meant to satisfy the demand for v2 translations and not to be an upto date implementation.
35 |
--------------------------------------------------------------------------------
/TFv2/ch02/Exercise 2.01.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "How would you initialize a 500x500 tensor with all elements equaling 0.5?"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 2,
22 | "metadata": {},
23 | "outputs": [
24 | {
25 | "data": {
26 | "text/plain": [
27 | ""
35 | ]
36 | },
37 | "execution_count": 2,
38 | "metadata": {},
39 | "output_type": "execute_result"
40 | }
41 | ],
42 | "source": [
43 | "tf.ones([500,500]) * 0.5"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": null,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": []
52 | }
53 | ],
54 | "metadata": {
55 | "kernelspec": {
56 | "display_name": "Python 3",
57 | "language": "python",
58 | "name": "python3"
59 | },
60 | "language_info": {
61 | "codemirror_mode": {
62 | "name": "ipython",
63 | "version": 3
64 | },
65 | "file_extension": ".py",
66 | "mimetype": "text/x-python",
67 | "name": "python",
68 | "nbconvert_exporter": "python",
69 | "pygments_lexer": "ipython3",
70 | "version": "3.8.3"
71 | }
72 | },
73 | "nbformat": 4,
74 | "nbformat_minor": 4
75 | }
76 |
--------------------------------------------------------------------------------
/TFv2/ch02/Exercise 2.02.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Exercise 2.2\n",
8 | "Use the TensorFlow operators you've learned so far to produce the Gaussian distribution.\n",
9 | "\n",
10 | "_Note_: The below snippets prioritize readability."
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 1,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "import tensorflow as tf\n",
20 | "from math import pi"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 2,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "def guassian_pdf(x, mu, sigma):\n",
30 | " coefficient = 1/(sigma * tf.sqrt(2*pi))\n",
31 | " exparg = -(1/2) * tf.pow(((x-mu)/sigma), 2)\n",
32 | " exp = tf.exp(exparg)\n",
33 | " return coefficient * exp"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": 3,
39 | "metadata": {},
40 | "outputs": [
41 | {
42 | "name": "stdout",
43 | "output_type": "stream",
44 | "text": [
45 | "tf.Tensor(0.24197073, shape=(), dtype=float32)\n"
46 | ]
47 | }
48 | ],
49 | "source": [
50 | "print(guassian_pdf(2, 1, 1))"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {},
57 | "outputs": [],
58 | "source": []
59 | }
60 | ],
61 | "metadata": {
62 | "kernelspec": {
63 | "display_name": "Python 3",
64 | "language": "python",
65 | "name": "python3"
66 | },
67 | "language_info": {
68 | "codemirror_mode": {
69 | "name": "ipython",
70 | "version": 3
71 | },
72 | "file_extension": ".py",
73 | "mimetype": "text/x-python",
74 | "name": "python",
75 | "nbconvert_exporter": "python",
76 | "pygments_lexer": "ipython3",
77 | "version": "3.8.3"
78 | }
79 | },
80 | "nbformat": 4,
81 | "nbformat_minor": 4
82 | }
83 |
--------------------------------------------------------------------------------
/TFv2/ch02/Exercise 2.03.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "# TensorFlow v2\n",
17 | "---"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "name": "stdout",
27 | "output_type": "stream",
28 | "text": [
29 | "[[-1. -2.]]\n"
30 | ]
31 | }
32 | ],
33 | "source": [
34 | "matrix = tf.constant([[1., 2.]])\n",
35 | "negMatrix = tf.negative(matrix)\n",
36 | "print(negMatrix.numpy())"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | "# TensorFlow v1 Compatibility\n",
44 | "---"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 7,
50 | "metadata": {},
51 | "outputs": [
52 | {
53 | "name": "stdout",
54 | "output_type": "stream",
55 | "text": [
56 | "[[-1. -2.]]\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "tf.compat.v1.disable_eager_execution()\n",
62 | "sess = tf.compat.v1.InteractiveSession()\n",
63 | "matrix = tf.constant([[1., 2.]])\n",
64 | "negMatrix = tf.compat.v1.negative(matrix)\n",
65 | "result = negMatrix.eval()\n",
66 | "sess.close()\n",
67 | "print(result)"
68 | ]
69 | }
70 | ],
71 | "metadata": {
72 | "kernelspec": {
73 | "display_name": "Python 3",
74 | "language": "python",
75 | "name": "python3"
76 | },
77 | "language_info": {
78 | "codemirror_mode": {
79 | "name": "ipython",
80 | "version": 3
81 | },
82 | "file_extension": ".py",
83 | "mimetype": "text/x-python",
84 | "name": "python",
85 | "nbconvert_exporter": "python",
86 | "pygments_lexer": "ipython3",
87 | "version": "3.8.3"
88 | }
89 | },
90 | "nbformat": 4,
91 | "nbformat_minor": 4
92 | }
93 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.03.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [
18 | {
19 | "name": "stdout",
20 | "output_type": "stream",
21 | "text": [
22 | "\n",
23 | "\n",
24 | "\n"
25 | ]
26 | }
27 | ],
28 | "source": [
29 | "m1 = [[1.0, 2.0], [3.0, 4.0]]\n",
30 | "m2 = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n",
31 | "m3 = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n",
32 | "\n",
33 | "print(type(m1))\n",
34 | "print(type(m2))\n",
35 | "print(type(m3))"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 3,
41 | "metadata": {},
42 | "outputs": [
43 | {
44 | "name": "stdout",
45 | "output_type": "stream",
46 | "text": [
47 | "\n",
48 | "\n",
49 | "\n"
50 | ]
51 | }
52 | ],
53 | "source": [
54 | "t1 = tf.convert_to_tensor(m1, dtype=tf.float32)\n",
55 | "t2 = tf.convert_to_tensor(m2, dtype=tf.float32)\n",
56 | "t3 = tf.convert_to_tensor(m3, dtype=tf.float32)\n",
57 | "\n",
58 | "print(type(t1))\n",
59 | "print(type(t2))\n",
60 | "print(type(t3))"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": []
69 | }
70 | ],
71 | "metadata": {
72 | "kernelspec": {
73 | "display_name": "Python 3",
74 | "language": "python",
75 | "name": "python3"
76 | },
77 | "language_info": {
78 | "codemirror_mode": {
79 | "name": "ipython",
80 | "version": 3
81 | },
82 | "file_extension": ".py",
83 | "mimetype": "text/x-python",
84 | "name": "python",
85 | "nbconvert_exporter": "python",
86 | "pygments_lexer": "ipython3",
87 | "version": "3.8.3"
88 | }
89 | },
90 | "nbformat": 4,
91 | "nbformat_minor": 4
92 | }
93 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.04.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "tf.Tensor([[1. 2.]], shape=(1, 2), dtype=float32)\n",
22 | "tf.Tensor(\n",
23 | "[[1]\n",
24 | " [2]], shape=(2, 1), dtype=int32)\n",
25 | "tf.Tensor(\n",
26 | "[[[ 1 2]\n",
27 | " [ 3 4]\n",
28 | " [ 5 6]]\n",
29 | "\n",
30 | " [[ 7 8]\n",
31 | " [ 9 10]\n",
32 | " [11 12]]], shape=(2, 3, 2), dtype=int32)\n"
33 | ]
34 | }
35 | ],
36 | "source": [
37 | "m1 = tf.constant([[1., 2.]])\n",
38 | "m2 = tf.constant([[1], [2]])\n",
39 | "m3 = tf.constant([[[1,2], [3,4], [5,6]],[[7,8], [9,10], [11,12]]])\n",
40 | "print(m1)\n",
41 | "print(m2)\n",
42 | "print(m3)"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": []
51 | }
52 | ],
53 | "metadata": {
54 | "kernelspec": {
55 | "display_name": "Python 3",
56 | "language": "python",
57 | "name": "python3"
58 | },
59 | "language_info": {
60 | "codemirror_mode": {
61 | "name": "ipython",
62 | "version": 3
63 | },
64 | "file_extension": ".py",
65 | "mimetype": "text/x-python",
66 | "name": "python",
67 | "nbconvert_exporter": "python",
68 | "pygments_lexer": "ipython3",
69 | "version": "3.8.3"
70 | }
71 | },
72 | "nbformat": 4,
73 | "nbformat_minor": 4
74 | }
75 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.05.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "name": "stdout",
19 | "output_type": "stream",
20 | "text": [
21 | "tf.Tensor([[-1 -2]], shape=(1, 2), dtype=int32)\n"
22 | ]
23 | }
24 | ],
25 | "source": [
26 | "x = tf.constant([[1,2]])\n",
27 | "negMatrix = tf.negative(x)\n",
28 | "print(negMatrix)"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "metadata": {},
35 | "outputs": [],
36 | "source": []
37 | }
38 | ],
39 | "metadata": {
40 | "kernelspec": {
41 | "display_name": "Python 3",
42 | "language": "python",
43 | "name": "python3"
44 | },
45 | "language_info": {
46 | "codemirror_mode": {
47 | "name": "ipython",
48 | "version": 3
49 | },
50 | "file_extension": ".py",
51 | "mimetype": "text/x-python",
52 | "name": "python",
53 | "nbconvert_exporter": "python",
54 | "pygments_lexer": "ipython3",
55 | "version": "3.8.3"
56 | }
57 | },
58 | "nbformat": 4,
59 | "nbformat_minor": 4
60 | }
61 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# What's new?\n",
8 | "\n",
9 | "With the advent of [eager execution](https://www.tensorflow.org/guide/effective_tf2#eager_execution), TensorFlow 2 does away with explicit calls to Session objects. \n",
10 | "As a matter of fact, TF2 prefers [functions, not sessions](https://www.tensorflow.org/guide/effective_tf2#functions_not_sessions).\n",
11 | "Find more information on the official [Effective TensorFlow 2](https://www.tensorflow.org/guide/effective_tf2) page. \n",
12 | "Here is a brief synopsis as the change from TF1 $\\rightarrow$ TF2\n",
13 | "\n",
14 | "**TensorFlow 1.X**\n",
15 | "\n",
16 | "`outputs = session.run(f(placeholder), feed_dict={placeholder: input})`\n",
17 | "\n",
18 | "\n",
19 | "**TensorFlow 2.0**\n",
20 | "\n",
21 | "`outputs = f(input)`\n",
22 | "\n",
23 | "---"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 3,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "import tensorflow as tf"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {},
38 | "source": [
39 | "---\n",
40 | "# TensorFlow v2"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": 17,
46 | "metadata": {},
47 | "outputs": [
48 | {
49 | "name": "stdout",
50 | "output_type": "stream",
51 | "text": [
52 | "[[-1. -2.]]\n"
53 | ]
54 | }
55 | ],
56 | "source": [
57 | "x = tf.constant([[1., 2.]])\n",
58 | "negMatrix = tf.negative(x)\n",
59 | "print(negMatrix.numpy())"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "---\n",
67 | "## TensorFlow v2 Compatibility\n",
68 | "\n",
69 | "You can still use `Session` functionality, but you must do so using the `tf.compat` module:"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 18,
75 | "metadata": {},
76 | "outputs": [
77 | {
78 | "name": "stdout",
79 | "output_type": "stream",
80 | "text": [
81 | "[[-1. -2.]]\n"
82 | ]
83 | }
84 | ],
85 | "source": [
86 | "with tf.compat.v1.Session() as sess:\n",
87 | " x = tf.constant([[1., 2.]])\n",
88 | " negMatrix = tf.negative(x)\n",
89 | " result = sess.run(negMatrix)\n",
90 | "print(result)"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": []
99 | }
100 | ],
101 | "metadata": {
102 | "kernelspec": {
103 | "display_name": "NLP - GPU (Python3)",
104 | "language": "python",
105 | "name": "nlp_gpu"
106 | },
107 | "language_info": {
108 | "codemirror_mode": {
109 | "name": "ipython",
110 | "version": 3
111 | },
112 | "file_extension": ".py",
113 | "mimetype": "text/x-python",
114 | "name": "python",
115 | "nbconvert_exporter": "python",
116 | "pygments_lexer": "ipython3",
117 | "version": "3.6.8"
118 | }
119 | },
120 | "nbformat": 4,
121 | "nbformat_minor": 4
122 | }
123 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.07.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "---\n",
17 | "# TensorFlow v2"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "name": "stdout",
27 | "output_type": "stream",
28 | "text": [
29 | "[[-1. -2.]]\n"
30 | ]
31 | }
32 | ],
33 | "source": [
34 | "x = tf.constant([[1., 2.]])\n",
35 | "negMatrix = tf.negative(x)\n",
36 | "result = negMatrix.numpy()\n",
37 | "print(result)"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "---\n",
45 | "# TensorFlow v1 Compatibility"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 3,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "WARNING:tensorflow:From /home/rob/.local/lib/python3.6/site-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.\n",
58 | "Instructions for updating:\n",
59 | "non-resource variables are not supported in the long term\n"
60 | ]
61 | }
62 | ],
63 | "source": [
64 | "import tensorflow.compat.v1 as tf\n",
65 | "tf.disable_v2_behavior()"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 4,
71 | "metadata": {},
72 | "outputs": [
73 | {
74 | "name": "stdout",
75 | "output_type": "stream",
76 | "text": [
77 | "[[-1. -2.]]\n"
78 | ]
79 | }
80 | ],
81 | "source": [
82 | "sess = tf.InteractiveSession()\n",
83 | "x = tf.constant([[1., 2.]])\n",
84 | "negMatrix = tf.negative(x)\n",
85 | "result = negMatrix.eval()\n",
86 | "print(result)\n",
87 | "sess.close()"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "---\n",
95 | "## _Warning_\n",
96 | "\n",
97 | "Attempting to combine v1 and v2 features without disabling eager execution can prove fatal:"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": 3,
103 | "metadata": {},
104 | "outputs": [],
105 | "source": [
106 | "import tensorflow as tf"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 4,
112 | "metadata": {},
113 | "outputs": [
114 | {
115 | "ename": "NotImplementedError",
116 | "evalue": "eval is not supported when eager execution is enabled, is .numpy() what you're looking for?",
117 | "output_type": "error",
118 | "traceback": [
119 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
120 | "\u001b[0;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
121 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconstant\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1.\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2.\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mnegMatrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnegative\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnegMatrix\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meval\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
122 | "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36meval\u001b[0;34m(self, feed_dict, session)\u001b[0m\n\u001b[1;32m 1146\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0meval\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msession\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1147\u001b[0m raise NotImplementedError(\n\u001b[0;32m-> 1148\u001b[0;31m \u001b[0;34m\"eval is not supported when eager execution is enabled, \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1149\u001b[0m \"is .numpy() what you're looking for?\")\n\u001b[1;32m 1150\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
123 | "\u001b[0;31mNotImplementedError\u001b[0m: eval is not supported when eager execution is enabled, is .numpy() what you're looking for?"
124 | ]
125 | }
126 | ],
127 | "source": [
128 | "sess = tf.compat.v1.InteractiveSession()\n",
129 | "x = tf.constant([[1., 2.]])\n",
130 | "negMatrix = tf.negative(x)\n",
131 | "result = negMatrix.eval()\n",
132 | "print(result)\n",
133 | "sess.close()"
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": []
142 | }
143 | ],
144 | "metadata": {
145 | "kernelspec": {
146 | "display_name": "Python 3",
147 | "language": "python",
148 | "name": "python3"
149 | },
150 | "language_info": {
151 | "codemirror_mode": {
152 | "name": "ipython",
153 | "version": 3
154 | },
155 | "file_extension": ".py",
156 | "mimetype": "text/x-python",
157 | "name": "python",
158 | "nbconvert_exporter": "python",
159 | "pygments_lexer": "ipython3",
160 | "version": "3.8.3"
161 | }
162 | },
163 | "nbformat": 4,
164 | "nbformat_minor": 4
165 | }
166 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.08.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "# TensorFlow v2\n",
17 | "---"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 2,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "name": "stdout",
27 | "output_type": "stream",
28 | "text": [
29 | "Executing op Neg in device /job:localhost/replica:0/task:0/device:GPU:0\n",
30 | "tf.Tensor([[-1. -2.]], shape=(1, 2), dtype=float32)\n"
31 | ]
32 | }
33 | ],
34 | "source": [
35 | "tf.debugging.set_log_device_placement(True)\n",
36 | "x = tf.constant([[1., 2.]])\n",
37 | "negMatrix = tf.negative(x)\n",
38 | "print(negMatrix)"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "# TensorFlow v1 Compatibility\n",
46 | "---"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 3,
52 | "metadata": {
53 | "scrolled": true
54 | },
55 | "outputs": [
56 | {
57 | "name": "stdout",
58 | "output_type": "stream",
59 | "text": [
60 | "Device mapping:\n",
61 | "/job:localhost/replica:0/task:0/device:XLA_CPU:0 -> device: XLA_CPU device\n",
62 | "/job:localhost/replica:0/task:0/device:XLA_GPU:0 -> device: XLA_GPU device\n",
63 | "/job:localhost/replica:0/task:0/device:GPU:0 -> device: 0, name: GeForce GTX 1060 6GB, pci bus id: 0000:01:00.0, compute capability: 6.1\n",
64 | "\n",
65 | "Tensor(\"Neg:0\", shape=(1, 2), dtype=float32)\n"
66 | ]
67 | }
68 | ],
69 | "source": [
70 | "with tf.compat.v1.Session() as sess:\n",
71 | " x = tf.constant([[1., 2.]])\n",
72 | " negMatrix = tf.negative(x)\n",
73 | "print(negMatrix)"
74 | ]
75 | }
76 | ],
77 | "metadata": {
78 | "kernelspec": {
79 | "display_name": "Python 3",
80 | "language": "python",
81 | "name": "python3"
82 | },
83 | "language_info": {
84 | "codemirror_mode": {
85 | "name": "ipython",
86 | "version": 3
87 | },
88 | "file_extension": ".py",
89 | "mimetype": "text/x-python",
90 | "name": "python",
91 | "nbconvert_exporter": "python",
92 | "pygments_lexer": "ipython3",
93 | "version": "3.8.3"
94 | }
95 | },
96 | "nbformat": 4,
97 | "nbformat_minor": 4
98 | }
99 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.09.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "# TensorFlow v2\n",
17 | "---"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 12,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "name": "stdout",
27 | "output_type": "stream",
28 | "text": [
29 | "Spike False\n",
30 | "Spike True\n",
31 | "Spike False\n",
32 | "Spike False\n",
33 | "Spike True\n",
34 | "Spike False\n",
35 | "Spike True\n"
36 | ]
37 | }
38 | ],
39 | "source": [
40 | "raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13.]\n",
41 | "spike = tf.Variable(False)\n",
42 | "\n",
43 | "for i in range(1, len(raw_data)):\n",
44 | " if raw_data[i] - raw_data[i-1] > 5:\n",
45 | " updater = spike.assign(True)\n",
46 | " else:\n",
47 | " spike.assign(False)\n",
48 | " print(\"Spike\", spike.numpy())"
49 | ]
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "# TensorFlow v1 Compatibility\n",
56 | "---"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 16,
62 | "metadata": {},
63 | "outputs": [
64 | {
65 | "name": "stdout",
66 | "output_type": "stream",
67 | "text": [
68 | "Spike False\n",
69 | "Spike True\n",
70 | "Spike False\n",
71 | "Spike False\n",
72 | "Spike True\n",
73 | "Spike False\n",
74 | "Spike True\n"
75 | ]
76 | }
77 | ],
78 | "source": [
79 | "tf.compat.v1.disable_eager_execution()\n",
80 | "sess = tf.compat.v1.InteractiveSession()\n",
81 | "\n",
82 | "raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13.]\n",
83 | "spike = tf.compat.v1.Variable(False)\n",
84 | "spike.initializer.run()\n",
85 | "\n",
86 | "for i in range(1, len(raw_data)):\n",
87 | " if raw_data[i] - raw_data[i-1] > 5:\n",
88 | " updater = tf.compat.v1.assign(spike, True)\n",
89 | " updater.eval()\n",
90 | " else:\n",
91 | " tf.compat.v1.assign(spike, False).eval()\n",
92 | " print(\"Spike\", spike.eval())\n",
93 | "sess.close()"
94 | ]
95 | }
96 | ],
97 | "metadata": {
98 | "kernelspec": {
99 | "display_name": "Python 3",
100 | "language": "python",
101 | "name": "python3"
102 | },
103 | "language_info": {
104 | "codemirror_mode": {
105 | "name": "ipython",
106 | "version": 3
107 | },
108 | "file_extension": ".py",
109 | "mimetype": "text/x-python",
110 | "name": "python",
111 | "nbconvert_exporter": "python",
112 | "pygments_lexer": "ipython3",
113 | "version": "3.8.3"
114 | }
115 | },
116 | "nbformat": 4,
117 | "nbformat_minor": 4
118 | }
119 |
--------------------------------------------------------------------------------
/TFv2/ch02/Listing 2.10 - 2.11.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "# TensorFlow v2\n",
17 | "---"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": 16,
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "name": "stdout",
27 | "output_type": "stream",
28 | "text": [
29 | "Saved checkpoint at: ./tf_ckpts/ckpt-1\n",
30 | "Saved checkpoint at: ./tf_ckpts/ckpt-2\n",
31 | "Saved checkpoint at: ./tf_ckpts/ckpt-3\n",
32 | "Spikes [False False True False False True False True]\n"
33 | ]
34 | }
35 | ],
36 | "source": [
37 | "raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13.]\n",
38 | "spikes = tf.Variable([False] * len(raw_data), name='spikes')\n",
39 | "\n",
40 | "ckpt = tf.train.Checkpoint(spikes=spikes)\n",
41 | "manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)\n",
42 | "spikes_val = [False for i in range(0, len(raw_data))]\n",
43 | "\n",
44 | "for i in range(1, len(raw_data)):\n",
45 | " if raw_data[i] - raw_data[i-1] > 5:\n",
46 | " spikes_val[i] = True\n",
47 | " updater = spikes.assign(spikes_val)\n",
48 | " save_path = manager.save()\n",
49 | " print(\"Saved checkpoint at: %s\" % save_path)\n",
50 | " \n",
51 | "print(\"Spikes\", spikes.numpy())"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 17,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "Restored from ./tf_ckpts/ckpt-3\n",
64 | "Spikes [False False True False False True False True]\n"
65 | ]
66 | }
67 | ],
68 | "source": [
69 | "ckpt.restore(manager.latest_checkpoint)\n",
70 | "if manager.latest_checkpoint:\n",
71 | " print(\"Restored from %s\" % manager.latest_checkpoint)\n",
72 | "\n",
73 | "to_restore = tf.Variable([False]*len(raw_data), name='spikes')\n",
74 | "restored = tf.train.Checkpoint(spikes=to_restore)\n",
75 | "status = restored.restore(tf.train.latest_checkpoint('./tf_ckpts'))\n",
76 | "\n",
77 | "print(\"Spikes\", to_restore.numpy())"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "# TensorFlow v1 Compatibility\n",
85 | "---\n",
86 | "\n",
87 | "# TODO\n",
88 | "The following code snippet **is non-functional**."
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "tf.compat.v1.disable_eager_execution()\n",
98 | "sess = tf.compat.v1.InteractiveSession()\n",
99 | "\n",
100 | "raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13.]\n",
101 | "\n",
102 | "spikes = tf.compat.v1.Variable([False] * len(raw_data), name='spikes')\n",
103 | "spikes.initializer.run()\n",
104 | "\n",
105 | "saver = tf.compat.v1.train.Saver()\n",
106 | "\n",
107 | "for i in range(1, len(raw_data)):\n",
108 | " if raw_data[i] - raw_data[i-1] > 5:\n",
109 | " spikes_val = spikes.eval()\n",
110 | " spikes_val[i] = True\n",
111 | " updater = tf.compat.v1.assign(spikes, spikes_val)\n",
112 | " updater.eval()\n",
113 | "\n",
114 | "save_path = saver.save(sess, \"./spikes.ckpt\")\n",
115 | "print(\"spikes data saved in file: %s\" % save_path)\n",
116 | "\n",
117 | "\n",
118 | "spikes = tf.Variable([False]*8, name='spikes')\n",
119 | "saver = tf.compat.v1.train.Saver()\n",
120 | "saver.restore(sess, \"./spikes.ckpt\")\n",
121 | "print(spikes.eval())\n",
122 | "sess.close()"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": []
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": []
138 | }
139 | ],
140 | "metadata": {
141 | "kernelspec": {
142 | "display_name": "Python 3",
143 | "language": "python",
144 | "name": "python3"
145 | },
146 | "language_info": {
147 | "codemirror_mode": {
148 | "name": "ipython",
149 | "version": 3
150 | },
151 | "file_extension": ".py",
152 | "mimetype": "text/x-python",
153 | "name": "python",
154 | "nbconvert_exporter": "python",
155 | "pygments_lexer": "ipython3",
156 | "version": "3.8.3"
157 | }
158 | },
159 | "nbformat": 4,
160 | "nbformat_minor": 4
161 | }
162 |
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/checkpoint:
--------------------------------------------------------------------------------
1 | model_checkpoint_path: "ckpt-3"
2 | all_model_checkpoint_paths: "ckpt-1"
3 | all_model_checkpoint_paths: "ckpt-2"
4 | all_model_checkpoint_paths: "ckpt-3"
5 | all_model_checkpoint_timestamps: 1595828479.0237708
6 | all_model_checkpoint_timestamps: 1595828479.02812
7 | all_model_checkpoint_timestamps: 1595828479.032299
8 | last_preserved_timestamp: 1595828456.7734568
9 |
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-1.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-1.data-00000-of-00001
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-1.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-1.index
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-2.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-2.data-00000-of-00001
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-2.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-2.index
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-3.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-3.data-00000-of-00001
--------------------------------------------------------------------------------
/TFv2/ch02/tf_ckpts/ckpt-3.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/TFv2/ch02/tf_ckpts/ckpt-3.index
--------------------------------------------------------------------------------
/TFv2/ch07/Listing 7.01.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "filenames = tf.io.match_filenames_once('../data/audio_dataset/*.wav')\n",
19 | "filename_ds = tf.data.Dataset.from_tensor_slices(filenames)"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 3,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "def read_file(file):\n",
29 | " file_contents = tf.io.read_file(file)\n",
30 | " return file, file_contents"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "filename_contents_ds = filename_ds.map(read_file)"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 5,
45 | "metadata": {},
46 | "outputs": [
47 | {
48 | "name": "stdout",
49 | "output_type": "stream",
50 | "text": [
51 | "filename b'../data/audio_dataset/cough_1.wav' \n",
52 | "filename b'../data/audio_dataset/cough_2.wav' \n",
53 | "filename b'../data/audio_dataset/scream_1.wav' \n",
54 | "filename b'../data/audio_dataset/scream_2.wav' \n",
55 | "filename b'../data/audio_dataset/scream_3.wav' \n"
56 | ]
57 | }
58 | ],
59 | "source": [
60 | "for file, file_contents in filename_contents_ds.take(5):\n",
61 | " print(\"filename %s \" % (file.numpy()))"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "metadata": {},
68 | "outputs": [],
69 | "source": []
70 | }
71 | ],
72 | "metadata": {
73 | "kernelspec": {
74 | "display_name": "Python 3",
75 | "language": "python",
76 | "name": "python3"
77 | },
78 | "language_info": {
79 | "codemirror_mode": {
80 | "name": "ipython",
81 | "version": 3
82 | },
83 | "file_extension": ".py",
84 | "mimetype": "text/x-python",
85 | "name": "python",
86 | "nbconvert_exporter": "python",
87 | "pygments_lexer": "ipython3",
88 | "version": "3.8.5"
89 | }
90 | },
91 | "nbformat": 4,
92 | "nbformat_minor": 2
93 | }
94 |
--------------------------------------------------------------------------------
/TFv2/ch07/Listing 7.04.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sys.path.append(\"../libs/basic_units/\")"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "import numpy as np\n",
28 | "import librosa\n",
29 | "import python_speech_features\n",
30 | "from basic_units import cm, inch\n",
31 | "import matplotlib.pyplot as plt\n",
32 | "from scipy.signal.windows import hann, hamming\n",
33 | "import tensorflow as tf\n",
34 | "import matplotlib.pyplot as plt"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 16,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "k = 2\n",
44 | "max_iterations = 100"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 4,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "n_mfcc = 22\n",
54 | "n_mels = 40\n",
55 | "n_fft = 16384 \n",
56 | "hop_length = 2205\n",
57 | "fmin = 0\n",
58 | "fmax = None\n",
59 | "rate = 44000"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": 5,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "def read_file(file):\n",
69 | " file_contents = tf.io.read_file(file)\n",
70 | " return file, file_contents"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": 6,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "filenames = tf.io.match_filenames_once('../data/audio_dataset/*.wav')\n",
80 | "filename_ds = tf.data.Dataset.from_tensor_slices(filenames)"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 7,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "filename_contents_ds = filename_ds.map(read_file)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 8,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "def get_next_chromagram(audio_file):\n",
99 | " print('filename %s ' % (audio_file))\n",
100 | " y, sr = librosa.load(audio_file, sr=rate)\n",
101 | " winlen=n_fft / sr\n",
102 | " winstep=hop_length/sr \n",
103 | " mfcc_speech = python_speech_features.mfcc(signal=y, samplerate=sr, winlen=winlen, winstep=winstep,\n",
104 | " numcep=n_mfcc, nfilt=n_mels, nfft=n_fft, lowfreq=fmin, highfreq=fmax,\n",
105 | " preemph=0.0, ceplifter=0, appendEnergy=False, winfunc=hamming) \n",
106 | " return mfcc_speech"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 9,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "def extract_feature_vector(chroma_data):\n",
116 | " num_samples, num_features = np.shape(chroma_data)\n",
117 | " print(\"Num features %d num samples %d \" % (num_features, num_samples))\n",
118 | " freq_vals = tf.argmax(chroma_data)\n",
119 | " hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))\n",
120 | " return hist.astype(float) / num_samples"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": 10,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "def get_dataset():\n",
130 | " filename_contents_ds_enum = filename_contents_ds.enumerate()\n",
131 | " xs = []\n",
132 | " for file_obj in filename_contents_ds_enum.as_numpy_iterator():\n",
133 | " chroma_data = get_next_chromagram(file_obj[1][0])\n",
134 | " x = [extract_feature_vector(chroma_data)]\n",
135 | " x = np.matrix(x)\n",
136 | " if len(xs) == 0:\n",
137 | " xs = x\n",
138 | " else:\n",
139 | " xs = np.vstack((xs, x))\n",
140 | " \n",
141 | " return xs"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": 11,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "def initial_cluster_centroids(X, k):\n",
151 | " return X[0:k, :]"
152 | ]
153 | },
154 | {
155 | "cell_type": "code",
156 | "execution_count": 12,
157 | "metadata": {},
158 | "outputs": [],
159 | "source": [
160 | "def assign_cluster(X, centroids):\n",
161 | " expanded_vectors = tf.expand_dims(X, 0) # 1, 5, 12\n",
162 | " expanded_centroids = tf.expand_dims(centroids, 1) #2, 1, 12\n",
163 | " distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2) #2, 5\n",
164 | " mins = tf.argmin(distances, 0)\n",
165 | " return mins"
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": 18,
171 | "metadata": {},
172 | "outputs": [],
173 | "source": [
174 | "def recompute_centroids(X, Y):\n",
175 | " sums = tf.math.unsorted_segment_sum(X, Y, k)\n",
176 | " counts = tf.math.unsorted_segment_sum(tf.ones_like(X), Y, k)\n",
177 | " return sums / counts"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": 20,
183 | "metadata": {},
184 | "outputs": [
185 | {
186 | "name": "stdout",
187 | "output_type": "stream",
188 | "text": [
189 | "filename b'../data/audio_dataset/cough_1.wav' \n",
190 | "Num features 22 num samples 16 \n",
191 | "filename b'../data/audio_dataset/cough_2.wav' \n",
192 | "Num features 22 num samples 25 \n",
193 | "filename b'../data/audio_dataset/scream_1.wav' \n",
194 | "Num features 22 num samples 19 \n",
195 | "filename b'../data/audio_dataset/scream_2.wav' \n",
196 | "Num features 22 num samples 43 \n",
197 | "filename b'../data/audio_dataset/scream_3.wav' \n",
198 | "Num features 22 num samples 61 \n",
199 | "tf.Tensor(\n",
200 | "[[0. 0.0625 0.125 0.1875 0. 0.\n",
201 | " 0. 0.125 0.0625 0.0625 0.125 0.1875\n",
202 | " 0.1875 0. 0.0625 0.1875 0. 0.\n",
203 | " 0. 0. 0. 0. ]\n",
204 | " [0.01572627 0.01 0.03947368 0.03451251 0.01 0.00819672\n",
205 | " 0.01 0.00409836 0. 0.01315789 0. 0.03041415\n",
206 | " 0.0379437 0.03631579 0.03631579 0.03631579 0.04528764 0.02409836\n",
207 | " 0.07357204 0.01581395 0.02409836 0.02409836]], shape=(2, 22), dtype=float64)\n"
208 | ]
209 | }
210 | ],
211 | "source": [
212 | "X = get_dataset()\n",
213 | "print(X)\n",
214 | "print(X.shape)\n",
215 | "centroids = initial_cluster_centroids(X, k)\n",
216 | "i, converged = 0, False\n",
217 | "while not converged and i < max_iterations:\n",
218 | " i += 1\n",
219 | " Y = assign_cluster(X, centroids)\n",
220 | " centroids = recompute_centroids(X, Y)\n",
221 | "print(centroids)"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "metadata": {},
228 | "outputs": [],
229 | "source": []
230 | }
231 | ],
232 | "metadata": {
233 | "kernelspec": {
234 | "display_name": "Python 3",
235 | "language": "python",
236 | "name": "python3"
237 | },
238 | "language_info": {
239 | "codemirror_mode": {
240 | "name": "ipython",
241 | "version": 3
242 | },
243 | "file_extension": ".py",
244 | "mimetype": "text/x-python",
245 | "name": "python",
246 | "nbconvert_exporter": "python",
247 | "pygments_lexer": "ipython3",
248 | "version": "3.8.5"
249 | }
250 | },
251 | "nbformat": 4,
252 | "nbformat_minor": 2
253 | }
254 |
--------------------------------------------------------------------------------
/TFv2/ch07/Listing 7.05 - 7.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import sys"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sys.path.append(\"../libs/basic_units/\")"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "import numpy as np\n",
28 | "import librosa\n",
29 | "import python_speech_features\n",
30 | "from basic_units import cm, inch\n",
31 | "import matplotlib.pyplot as plt\n",
32 | "from scipy.signal.windows import hann, hamming\n",
33 | "import tensorflow as tf\n",
34 | "import matplotlib.pyplot as plt"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 4,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "k = 2\n",
44 | "max_iterations = 100\n",
45 | "segment_size = 50"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 5,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "n_mfcc = 22\n",
55 | "n_mels = 40\n",
56 | "n_fft = 16384 \n",
57 | "hop_length = 2205\n",
58 | "fmin = 0\n",
59 | "fmax = None\n",
60 | "rate = 44000"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 6,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "def read_file(file):\n",
70 | " file_contents = tf.io.read_file(file)\n",
71 | " return file, file_contents"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 7,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "filenames = tf.io.match_filenames_once('../data/audio_dataset/*.wav')\n",
81 | "filename_ds = tf.data.Dataset.from_tensor_slices(filenames)"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 8,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "filename_contents_ds = filename_ds.map(read_file)"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": 9,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "def get_chromagram(audio_file):\n",
100 | " print('filename %s ' % (audio_file))\n",
101 | " y, sr = librosa.load(audio_file, sr=rate)\n",
102 | " winlen=n_fft / sr\n",
103 | " winstep=hop_length/sr \n",
104 | " mfcc_speech = python_speech_features.mfcc(signal=y, samplerate=sr, winlen=winlen, winstep=winstep,\n",
105 | " numcep=n_mfcc, nfilt=n_mels, nfft=n_fft, lowfreq=fmin, highfreq=fmax,\n",
106 | " preemph=0.0, ceplifter=0, appendEnergy=False, winfunc=hamming) \n",
107 | " return mfcc_speech"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 10,
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "def extract_feature_vector(chroma_data):\n",
117 | " num_samples, num_features = np.shape(chroma_data)\n",
118 | " print(\"Num features %d num samples %d \" % (num_features, num_samples))\n",
119 | " freq_vals = tf.argmax(chroma_data)\n",
120 | " hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))\n",
121 | " return hist.astype(float) / num_samples"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 32,
127 | "metadata": {},
128 | "outputs": [],
129 | "source": [
130 | "def get_dataset(audio_file):\n",
131 | " chroma_data = get_chromagram(audio_file)\n",
132 | " print('chroma_data', np.shape(chroma_data))\n",
133 | " chroma_length = np.shape(chroma_data)[0]\n",
134 | " print('chroma_length', chroma_length)\n",
135 | " xs = []\n",
136 | " for i in range(chroma_length // segment_size):\n",
137 | " chroma_segment = chroma_data[i*segment_size:(i+1)*segment_size, :]\n",
138 | " x = extract_feature_vector(chroma_segment)\n",
139 | " if len(xs) == 0:\n",
140 | " xs = x\n",
141 | " else:\n",
142 | " xs = np.vstack((xs, x))\n",
143 | " return xs"
144 | ]
145 | },
146 | {
147 | "cell_type": "code",
148 | "execution_count": 33,
149 | "metadata": {},
150 | "outputs": [],
151 | "source": [
152 | "def initial_cluster_centroids(X, k):\n",
153 | " return X[0:k, :]"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": 34,
159 | "metadata": {},
160 | "outputs": [],
161 | "source": [
162 | "def assign_cluster(X, centroids):\n",
163 | " expanded_vectors = tf.expand_dims(X, 0) # 1, 5, 12\n",
164 | " expanded_centroids = tf.expand_dims(centroids, 1) #2, 1, 12\n",
165 | " distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2) #2, 5\n",
166 | " mins = tf.argmin(distances, 0)\n",
167 | " return mins"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": 35,
173 | "metadata": {},
174 | "outputs": [],
175 | "source": [
176 | "def recompute_centroids(X, Y):\n",
177 | " sums = tf.math.unsorted_segment_sum(X, Y, k)\n",
178 | " counts = tf.math.unsorted_segment_sum(tf.ones_like(X), Y, k)\n",
179 | " return sums / counts"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": 37,
185 | "metadata": {},
186 | "outputs": [
187 | {
188 | "name": "stdout",
189 | "output_type": "stream",
190 | "text": [
191 | "filename ../data/TalkingMachinesPodcast.wav \n",
192 | "chroma_data (626, 22)\n",
193 | "chroma_length 626\n",
194 | "Num features 22 num samples 50 \n",
195 | "Num features 22 num samples 50 \n",
196 | "Num features 22 num samples 50 \n",
197 | "Num features 22 num samples 50 \n",
198 | "Num features 22 num samples 50 \n",
199 | "Num features 22 num samples 50 \n",
200 | "Num features 22 num samples 50 \n",
201 | "Num features 22 num samples 50 \n",
202 | "Num features 22 num samples 50 \n",
203 | "Num features 22 num samples 50 \n",
204 | "Num features 22 num samples 50 \n",
205 | "Num features 22 num samples 50 \n",
206 | "(12, 22)\n",
207 | "iteration 50\n",
208 | "iteration 100\n",
209 | "0.0m 0.0s 0\n",
210 | "0.0m 5.0s 1\n",
211 | "0.0m 10.0s 0\n",
212 | "0.0m 15.0s 0\n",
213 | "0.0m 20.0s 1\n",
214 | "0.0m 25.0s 0\n",
215 | "0.0m 30.0s 0\n",
216 | "0.0m 35.0s 0\n",
217 | "0.0m 40.0s 0\n",
218 | "0.0m 45.0s 0\n",
219 | "0.0m 50.0s 0\n",
220 | "0.0m 55.0s 0\n"
221 | ]
222 | }
223 | ],
224 | "source": [
225 | "X = get_dataset('../data/TalkingMachinesPodcast.wav')\n",
226 | "print(np.shape(X))\n",
227 | "centroids = initial_cluster_centroids(X, k)\n",
228 | "i, converged = 0, False\n",
229 | "while not converged and i < max_iterations:\n",
230 | " i += 1\n",
231 | " Y = assign_cluster(X, centroids)\n",
232 | " centroids = recompute_centroids(X, Y)\n",
233 | " if i % 50 == 0:\n",
234 | " print('iteration', i)\n",
235 | "\n",
236 | "segments = Y\n",
237 | "for i in range(len(segments)):\n",
238 | " seconds = (i * segment_size) / float(10)\n",
239 | " min, sec = divmod(seconds, 60)\n",
240 | " time_str = '{}m {}s'.format(min, sec)\n",
241 | " print(time_str, segments[i].numpy())"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": null,
247 | "metadata": {},
248 | "outputs": [],
249 | "source": []
250 | }
251 | ],
252 | "metadata": {
253 | "kernelspec": {
254 | "display_name": "Python 3",
255 | "language": "python",
256 | "name": "python3"
257 | },
258 | "language_info": {
259 | "codemirror_mode": {
260 | "name": "ipython",
261 | "version": 3
262 | },
263 | "file_extension": ".py",
264 | "mimetype": "text/x-python",
265 | "name": "python",
266 | "nbconvert_exporter": "python",
267 | "pygments_lexer": "ipython3",
268 | "version": "3.8.5"
269 | }
270 | },
271 | "nbformat": 4,
272 | "nbformat_minor": 2
273 | }
274 |
--------------------------------------------------------------------------------
/TFv2/ch09/Listing 9.07 - 9.11.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import tensorflow as tf "
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 20,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "class HMM(object):\n",
20 | " def __init__(self, initial_prob, trans_prob, obs_prob):\n",
21 | " self.N = np.size(initial_prob)\n",
22 | " self.initial_prob = initial_prob\n",
23 | " self.trans_prob = trans_prob\n",
24 | " self.emission = obs_prob\n",
25 | " assert self.initial_prob.shape == (self.N, 1)\n",
26 | " assert self.trans_prob.shape == (self.N, self.N)\n",
27 | " assert obs_prob.shape[0] == self.N\n",
28 | " self.obs_idx = 0\n",
29 | " self.fwd = tf.cast(0., tf.double)\n",
30 | " self.viterbi = None\n",
31 | " \n",
32 | " def get_emission(self, obs_idx):\n",
33 | " slice_location = [0, obs_idx]\n",
34 | " num_rows = tf.shape(self.emission)[0]\n",
35 | " slice_shape = [num_rows, 1]\n",
36 | " return tf.slice(self.emission, slice_location, slice_shape)\n",
37 | " \n",
38 | " def forward_init_op(self, obs_idx):\n",
39 | " self.obs_idx = obs_idx\n",
40 | " obs_prob = self.get_emission(self.obs_idx)\n",
41 | " fwd = tf.multiply(self.initial_prob, obs_prob)\n",
42 | " return fwd\n",
43 | " \n",
44 | " def forward_op(self, obs_idx, fwd):\n",
45 | " self.obs_idx = obs_idx\n",
46 | " self.fwd = fwd\n",
47 | " transitions = tf.matmul(self.fwd,\n",
48 | " tf.transpose(self.get_emission(self.obs_idx)))\n",
49 | " weighted_transitions = transitions * self.trans_prob\n",
50 | " fwd = tf.reduce_sum(weighted_transitions, 0)\n",
51 | " return tf.reshape(fwd, tf.shape(self.fwd))\n",
52 | " \n",
53 | " def decode_op(self, obs_idx, viterbi):\n",
54 | " self.viterbi = viterbi\n",
55 | " self.obs_idx = obs_idx\n",
56 | " transitions = tf.matmul(viterbi,\n",
57 | " tf.transpose(self.get_emission(self.obs_idx)))\n",
58 | " weighted_transitions = transitions * self.trans_prob\n",
59 | " viterbi = tf.reduce_max(weighted_transitions, 0)\n",
60 | " return tf.reshape(viterbi, tf.shape(self.viterbi))\n",
61 | "\n",
62 | " def backpt_op(self, viterbi):\n",
63 | " back_transitions = tf.matmul(self.viterbi, np.ones((1, self.N)))\n",
64 | " weighted_back_transitions = back_transitions * self.trans_prob\n",
65 | " return tf.argmax(weighted_back_transitions, 0) "
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 21,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "def forward_algorithm(sess, hmm, observations):\n",
75 | " fwd = hmm.forward_init_op(observations[0])\n",
76 | " for t in range(1, len(observations)):\n",
77 | " fwd = hmm.forward_op(observations[t],fwd)\n",
78 | " prob = tf.reduce_sum(fwd)\n",
79 | " return prob"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 22,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "def viterbi_decode(hmm, observations):\n",
89 | " viterbi = hmm.forward_init_op(observations[0])\n",
90 | " backpts = np.ones((hmm.N, len(observations)), 'int32') * -1\n",
91 | " for t in range(1, len(observations)):\n",
92 | " viterbi = hmm.decode_op(observations[t], viterbi)\n",
93 | " backpt = hmm.backpt_op(viterbi) \n",
94 | " backpts[:, t] = backpt\n",
95 | " \n",
96 | " tokens = [tf.math.argmax(viterbi[:, -1], 0)]\n",
97 | " for i in range(len(observations) - 1, 0, -1):\n",
98 | " tokens.append(backpts[tokens[-1], i])\n",
99 | " return tokens[::-1]\n"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 25,
105 | "metadata": {},
106 | "outputs": [
107 | {
108 | "name": "stdout",
109 | "output_type": "stream",
110 | "text": [
111 | "Most likely hidden states are [1, 0, 0, 0, ]\n"
112 | ]
113 | }
114 | ],
115 | "source": [
116 | "initial_prob = np.array([[0.6],[0.4]])\n",
117 | "trans_prob = np.array([[0.7, 0.3],\n",
118 | " [0.4, 0.6]])\n",
119 | "obs_prob = np.array([[0.1, 0.4, 0.5],\n",
120 | " [0.6, 0.3, 0.1]])\n",
121 | "hmm = HMM(initial_prob=initial_prob, trans_prob=trans_prob, obs_prob=obs_prob)\n",
122 | "observations = [0, 1, 1, 2, 1]\n",
123 | "seq = viterbi_decode(hmm, observations)\n",
124 | "print('Most likely hidden states are {}'.format(seq))\n"
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": null,
130 | "metadata": {},
131 | "outputs": [],
132 | "source": []
133 | }
134 | ],
135 | "metadata": {
136 | "kernelspec": {
137 | "display_name": "Python 3",
138 | "language": "python",
139 | "name": "python3"
140 | },
141 | "language_info": {
142 | "codemirror_mode": {
143 | "name": "ipython",
144 | "version": 3
145 | },
146 | "file_extension": ".py",
147 | "mimetype": "text/x-python",
148 | "name": "python",
149 | "nbconvert_exporter": "python",
150 | "pygments_lexer": "ipython3",
151 | "version": "3.8.5"
152 | }
153 | },
154 | "nbformat": 4,
155 | "nbformat_minor": 2
156 | }
157 |
--------------------------------------------------------------------------------
/TFv2/ch16/Listing 16.01 - 16.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import tensorflow as tf"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "optimizer = tf.keras.optimizers.Adam()"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 3,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "def loss(inputs, outputs):\n",
29 | " return tf.reduce_mean(tf.square(inputs - outputs))"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 4,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "def train_step(model, inputs, outputs):\n",
39 | " with tf.GradientTape() as t:\n",
40 | " current_loss = loss(model(inputs), outputs)\n",
41 | " \n",
42 | " grads = t.gradient(current_loss, model.trainable_variables)\n",
43 | " optimizer.apply_gradients(zip(grads,model.trainable_variables))\n",
44 | " return current_loss"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 5,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "class SeriesPredictor(tf.keras.Model):\n",
54 | " def __init__(self, input_dim, seq_size, hidden_dim=10):\n",
55 | " super(SeriesPredictor, self).__init__()\n",
56 | " self.input_dim = input_dim\n",
57 | " self.seq_size = seq_size\n",
58 | " self.hidden_dim = hidden_dim\n",
59 | " \n",
60 | " self.cell = tf.keras.layers.SimpleRNNCell(self.hidden_dim)\n",
61 | " self.rnn = tf.keras.layers.RNN(self.cell)\n",
62 | " self.fc = tf.keras.layers.Dense(self.seq_size)\n",
63 | " \n",
64 | " def call(self, x): \n",
65 | " x = self.rnn(x)\n",
66 | " x = self.fc(x)\n",
67 | " \n",
68 | " return x"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 6,
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "model = SeriesPredictor(input_dim=1, seq_size=4, hidden_dim=10)"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": 7,
83 | "metadata": {},
84 | "outputs": [],
85 | "source": [
86 | "ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, \n",
87 | " model=model)\n",
88 | "manager = tf.train.CheckpointManager(ckpt, '../models/ch16-model.ckpt', max_to_keep=3)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 8,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "def train(model, inputs, outputs):\n",
98 | " for i in range(1000):\n",
99 | " err = train_step(model, inputs, outputs)\n",
100 | " ckpt.step.assign_add(1)\n",
101 | " if i % 100 == 0: \n",
102 | " save_path = manager.save()\n",
103 | " print(\"Saved checkpoint for step {}: {}\".format(int(ckpt.step), save_path))\n",
104 | " print(\"loss {:1.2f}\".format(err.numpy()))\n",
105 | " \n",
106 | " return save_path"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 9,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "def test(model, inputs):\n",
116 | " ckpt.restore(save_path).assert_consumed()\n",
117 | " print(\"Model restored.\")\n",
118 | " outputs = model(inputs)\n",
119 | " print(outputs.numpy())"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": 10,
125 | "metadata": {},
126 | "outputs": [
127 | {
128 | "name": "stdout",
129 | "output_type": "stream",
130 | "text": [
131 | "(3, 4, 1)\n",
132 | "(3, 4)\n",
133 | "(2, 4, 1)\n"
134 | ]
135 | }
136 | ],
137 | "source": [
138 | "train_x = np.asarray([[[1], [2], [5], [6]],\n",
139 | " [[5], [7], [7], [8]],\n",
140 | " [[3], [4], [5], [7]]], dtype=np.float32)\n",
141 | "train_y = np.asarray([[1, 3, 7, 11],\n",
142 | " [5, 12, 14, 15],\n",
143 | " [3, 7, 9, 12]], dtype=np.float32)\n",
144 | "\n",
145 | "test_x = np.asarray([[[1], [2], [3], [4]],\n",
146 | " [[4], [5], [6], [7]]], dtype=np.float32)\n",
147 | "\n",
148 | "print(np.shape(train_x))\n",
149 | "print(np.shape(train_y))\n",
150 | "print(np.shape(test_x))\n"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 11,
156 | "metadata": {},
157 | "outputs": [
158 | {
159 | "name": "stdout",
160 | "output_type": "stream",
161 | "text": [
162 | "Saved checkpoint for step 2: ../models/ch16-model.ckpt/ckpt-1\n",
163 | "loss 87.04\n",
164 | "Saved checkpoint for step 102: ../models/ch16-model.ckpt/ckpt-2\n",
165 | "loss 68.47\n",
166 | "Saved checkpoint for step 202: ../models/ch16-model.ckpt/ckpt-3\n",
167 | "loss 53.56\n",
168 | "Saved checkpoint for step 302: ../models/ch16-model.ckpt/ckpt-4\n",
169 | "loss 42.57\n",
170 | "Saved checkpoint for step 402: ../models/ch16-model.ckpt/ckpt-5\n",
171 | "loss 34.20\n",
172 | "Saved checkpoint for step 502: ../models/ch16-model.ckpt/ckpt-6\n",
173 | "loss 27.70\n",
174 | "Saved checkpoint for step 602: ../models/ch16-model.ckpt/ckpt-7\n",
175 | "loss 22.65\n",
176 | "Saved checkpoint for step 702: ../models/ch16-model.ckpt/ckpt-8\n",
177 | "loss 18.73\n",
178 | "Saved checkpoint for step 802: ../models/ch16-model.ckpt/ckpt-9\n",
179 | "loss 15.71\n",
180 | "Saved checkpoint for step 902: ../models/ch16-model.ckpt/ckpt-10\n",
181 | "loss 13.40\n"
182 | ]
183 | }
184 | ],
185 | "source": [
186 | "save_path = train(model, train_x, train_y)"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": 12,
192 | "metadata": {},
193 | "outputs": [
194 | {
195 | "name": "stdout",
196 | "output_type": "stream",
197 | "text": [
198 | "Model restored.\n",
199 | "[[2.9591253 6.653071 8.215342 7.891247 ]\n",
200 | " [2.9956038 6.7035503 8.27158 7.931595 ]]\n"
201 | ]
202 | }
203 | ],
204 | "source": [
205 | "test(model, test_x)"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": []
214 | }
215 | ],
216 | "metadata": {
217 | "kernelspec": {
218 | "display_name": "Python 3",
219 | "language": "python",
220 | "name": "python3"
221 | },
222 | "language_info": {
223 | "codemirror_mode": {
224 | "name": "ipython",
225 | "version": 3
226 | },
227 | "file_extension": ".py",
228 | "mimetype": "text/x-python",
229 | "name": "python",
230 | "nbconvert_exporter": "python",
231 | "pygments_lexer": "ipython3",
232 | "version": "3.8.5"
233 | }
234 | },
235 | "nbformat": 4,
236 | "nbformat_minor": 2
237 | }
238 |
--------------------------------------------------------------------------------
/TFv2/ch18/Listing 18.05 - 18.08.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "embeddings_0d = tf.constant([17, 22, 35, 51])"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "embeddings_4d = tf.constant([[1, 0, 0, 0],\n",
28 | " [0, 1, 0, 0],\n",
29 | " [0, 0, 1, 0],\n",
30 | " [0, 0, 0, 1]])\n"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "embeddings_2x2d = tf.constant([[[1, 0], [0, 0]],\n",
40 | " [[0, 1], [0, 0]],\n",
41 | " [[0, 0], [1, 0]],\n",
42 | " [[0, 0], [0, 1]]])\n"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 5,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "ids = tf.constant([1, 0, 2])"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 6,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "tf.Tensor([22 17 35], shape=(3,), dtype=int32)\n",
64 | "tf.Tensor(\n",
65 | "[[0 1 0 0]\n",
66 | " [1 0 0 0]\n",
67 | " [0 0 1 0]], shape=(3, 4), dtype=int32)\n",
68 | "tf.Tensor(\n",
69 | "[[[0 1]\n",
70 | " [0 0]]\n",
71 | "\n",
72 | " [[1 0]\n",
73 | " [0 0]]\n",
74 | "\n",
75 | " [[0 0]\n",
76 | " [1 0]]], shape=(3, 2, 2), dtype=int32)\n"
77 | ]
78 | }
79 | ],
80 | "source": [
81 | "lookup_0d = tf.nn.embedding_lookup(embeddings_0d, ids)\n",
82 | "print(lookup_0d)\n",
83 | "\n",
84 | "lookup_4d = tf.nn.embedding_lookup(embeddings_4d, ids)\n",
85 | "print(lookup_4d)\n",
86 | "\n",
87 | "lookup_2x2d = tf.nn.embedding_lookup(embeddings_2x2d, ids)\n",
88 | "print(lookup_2x2d)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": []
97 | }
98 | ],
99 | "metadata": {
100 | "kernelspec": {
101 | "display_name": "Python 3",
102 | "language": "python",
103 | "name": "python3"
104 | },
105 | "language_info": {
106 | "codemirror_mode": {
107 | "name": "ipython",
108 | "version": 3
109 | },
110 | "file_extension": ".py",
111 | "mimetype": "text/x-python",
112 | "name": "python",
113 | "nbconvert_exporter": "python",
114 | "pygments_lexer": "ipython3",
115 | "version": "3.8.5"
116 | }
117 | },
118 | "nbformat": 4,
119 | "nbformat_minor": 2
120 | }
121 |
--------------------------------------------------------------------------------
/build_TFv2_environment.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # rm -f requirements.txt
3 | # pipenv run pip freeze | grep -v horovod > requirements.txt
4 | docker build --rm --pull --tag chrismattmann/mltf2:tf2 -f Dockerfile-TFv2 .
5 |
6 |
--------------------------------------------------------------------------------
/build_environment.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # rm -f requirements.txt
3 | # pipenv run pip freeze | grep -v horovod > requirements.txt
4 | docker build --rm --pull --tag chrismattmann/mltf2:latest -f Dockerfile .
5 |
--------------------------------------------------------------------------------
/ch02/Listing 2.01.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from random import sample\n",
10 | "import numpy"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "revenue = 0\n",
20 | "prices = numpy.random.uniform(0, 10000.0, size=(100))\n",
21 | "amounts = sample(range(1,101), 100)"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "# By hand using Python Native\n",
29 | "In this example we will use regular python no libraries to do the dot product"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 3,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "for price, amount in zip(prices, amounts):\n",
39 | " revenue += price * amount"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "# Use NumPy and Dot instead of by Hand\n",
47 | "In this example we will use numpy.dot and show how easy it is"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": 4,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": [
56 | "revenue2 = numpy.dot(prices,amounts)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 5,
62 | "metadata": {},
63 | "outputs": [
64 | {
65 | "name": "stdout",
66 | "output_type": "stream",
67 | "text": [
68 | "$22,170,528.29\n",
69 | "$22,170,528.29\n"
70 | ]
71 | }
72 | ],
73 | "source": [
74 | "print('${:,.2f}'.format(revenue))\n",
75 | "print('${:,.2f}'.format(revenue2))"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": []
84 | }
85 | ],
86 | "metadata": {
87 | "kernelspec": {
88 | "display_name": "Python 3",
89 | "language": "python",
90 | "name": "python3"
91 | },
92 | "language_info": {
93 | "codemirror_mode": {
94 | "name": "ipython",
95 | "version": 3
96 | },
97 | "file_extension": ".py",
98 | "mimetype": "text/x-python",
99 | "name": "python",
100 | "nbconvert_exporter": "python",
101 | "pygments_lexer": "ipython3",
102 | "version": "3.7.0b1"
103 | }
104 | },
105 | "nbformat": 4,
106 | "nbformat_minor": 2
107 | }
108 |
--------------------------------------------------------------------------------
/ch02/Listing 2.03.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import numpy as np"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "m1 = [[1.0, 2.0], [3.0, 4.0]]\n",
28 | "m2 = np.array([[1.0,2.0],\n",
29 | " [3.0,4.0]], dtype=np.float32)\n",
30 | "m3 = tf.constant([[1.0, 2.0],\n",
31 | " [3.0, 4.0]])\n"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 4,
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "name": "stdout",
41 | "output_type": "stream",
42 | "text": [
43 | "\n",
44 | "\n",
45 | "\n"
46 | ]
47 | }
48 | ],
49 | "source": [
50 | "print(type(m1))\n",
51 | "print(type(m2))\n",
52 | "print(type(m3))"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 5,
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "t1 = tf.convert_to_tensor(m1, dtype=tf.float32)\n",
62 | "t2 = tf.convert_to_tensor(m2, dtype=tf.float32)\n",
63 | "t3 = tf.convert_to_tensor(m3, dtype=tf.float32)"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 6,
69 | "metadata": {},
70 | "outputs": [
71 | {
72 | "name": "stdout",
73 | "output_type": "stream",
74 | "text": [
75 | "\n",
76 | "\n",
77 | "\n"
78 | ]
79 | }
80 | ],
81 | "source": [
82 | "print(type(t1))\n",
83 | "print(type(t2))\n",
84 | "print(type(t3))"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "metadata": {},
91 | "outputs": [],
92 | "source": []
93 | }
94 | ],
95 | "metadata": {
96 | "kernelspec": {
97 | "display_name": "Python 3",
98 | "language": "python",
99 | "name": "python3"
100 | },
101 | "language_info": {
102 | "codemirror_mode": {
103 | "name": "ipython",
104 | "version": 3
105 | },
106 | "file_extension": ".py",
107 | "mimetype": "text/x-python",
108 | "name": "python",
109 | "nbconvert_exporter": "python",
110 | "pygments_lexer": "ipython3",
111 | "version": "3.7.0b1"
112 | }
113 | },
114 | "nbformat": 4,
115 | "nbformat_minor": 2
116 | }
117 |
--------------------------------------------------------------------------------
/ch02/Listing 2.04 - 2.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "m1 = tf.constant([[1.,2.]])"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "m2 = tf.constant([[1],[2]])"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 4,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "m3 = tf.constant([ [[1,2],\n",
37 | " [3,4],\n",
38 | " [5,6]],\n",
39 | " [[7,8],\n",
40 | " [9,10],\n",
41 | " [11,12]]\n",
42 | " ])"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 5,
48 | "metadata": {},
49 | "outputs": [
50 | {
51 | "name": "stdout",
52 | "output_type": "stream",
53 | "text": [
54 | "Tensor(\"Const:0\", shape=(1, 2), dtype=float32)\n",
55 | "Tensor(\"Const_1:0\", shape=(2, 1), dtype=int32)\n",
56 | "Tensor(\"Const_2:0\", shape=(2, 3, 2), dtype=int32)\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "print(m1)\n",
62 | "print(m2)\n",
63 | "print(m3)"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 6,
69 | "metadata": {},
70 | "outputs": [
71 | {
72 | "name": "stdout",
73 | "output_type": "stream",
74 | "text": [
75 | "Tensor(\"Neg:0\", shape=(1, 2), dtype=float32)\n"
76 | ]
77 | }
78 | ],
79 | "source": [
80 | "negMatrix = tf.negative(m1)\n",
81 | "print(negMatrix)\n"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 7,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": [
90 | "with tf.Session() as sess:\n",
91 | " result = sess.run(negMatrix)\n"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 8,
97 | "metadata": {},
98 | "outputs": [
99 | {
100 | "name": "stdout",
101 | "output_type": "stream",
102 | "text": [
103 | "[[-1. -2.]]\n"
104 | ]
105 | }
106 | ],
107 | "source": [
108 | "print(result)"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": []
117 | }
118 | ],
119 | "metadata": {
120 | "kernelspec": {
121 | "display_name": "Python 3",
122 | "language": "python",
123 | "name": "python3"
124 | },
125 | "language_info": {
126 | "codemirror_mode": {
127 | "name": "ipython",
128 | "version": 3
129 | },
130 | "file_extension": ".py",
131 | "mimetype": "text/x-python",
132 | "name": "python",
133 | "nbconvert_exporter": "python",
134 | "pygments_lexer": "ipython3",
135 | "version": "3.7.0b1"
136 | }
137 | },
138 | "nbformat": 4,
139 | "nbformat_minor": 2
140 | }
141 |
--------------------------------------------------------------------------------
/ch02/Listing 2.07 - 2.08.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sess = tf.InteractiveSession()"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "x = tf.constant([[1.,2.]])"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 4,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "negMatrix = tf.negative(x)"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 5,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "result = negMatrix.eval()"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 6,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "[[-1. -2.]]\n"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "print(result)"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": 7,
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "sess.close()"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": 8,
77 | "metadata": {},
78 | "outputs": [
79 | {
80 | "name": "stdout",
81 | "output_type": "stream",
82 | "text": [
83 | "Device mapping:\n",
84 | "/job:localhost/replica:0/task:0/device:XLA_CPU:0 -> device: XLA_CPU device\n",
85 | "\n"
86 | ]
87 | }
88 | ],
89 | "source": [
90 | "with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n",
91 | " options = tf.RunOptions(output_partition_graphs=True)\n",
92 | " metadata = tf.RunMetadata()\n",
93 | " result = sess.run(negMatrix,options=options, run_metadata=metadata)"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": 9,
99 | "metadata": {},
100 | "outputs": [
101 | {
102 | "name": "stdout",
103 | "output_type": "stream",
104 | "text": [
105 | "[[-1. -2.]]\n"
106 | ]
107 | }
108 | ],
109 | "source": [
110 | "print(result)"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": 10,
116 | "metadata": {},
117 | "outputs": [
118 | {
119 | "name": "stdout",
120 | "output_type": "stream",
121 | "text": [
122 | "[node {\n",
123 | " name: \"_retval_Neg_0_0\"\n",
124 | " op: \"_Retval\"\n",
125 | " input: \"Neg/_0__cf__1\"\n",
126 | " device: \"/job:localhost/replica:0/task:0/device:CPU:0\"\n",
127 | " attr {\n",
128 | " key: \"T\"\n",
129 | " value {\n",
130 | " type: DT_FLOAT\n",
131 | " }\n",
132 | " }\n",
133 | " attr {\n",
134 | " key: \"index\"\n",
135 | " value {\n",
136 | " i: 0\n",
137 | " }\n",
138 | " }\n",
139 | " experimental_debug_info {\n",
140 | " original_node_names: \"_retval_Neg_0_0\"\n",
141 | " }\n",
142 | "}\n",
143 | "node {\n",
144 | " name: \"Neg/_0__cf__1\"\n",
145 | " op: \"Const\"\n",
146 | " device: \"/job:localhost/replica:0/task:0/device:CPU:0\"\n",
147 | " attr {\n",
148 | " key: \"dtype\"\n",
149 | " value {\n",
150 | " type: DT_FLOAT\n",
151 | " }\n",
152 | " }\n",
153 | " attr {\n",
154 | " key: \"value\"\n",
155 | " value {\n",
156 | " tensor {\n",
157 | " dtype: DT_FLOAT\n",
158 | " tensor_shape {\n",
159 | " dim {\n",
160 | " size: 1\n",
161 | " }\n",
162 | " dim {\n",
163 | " size: 2\n",
164 | " }\n",
165 | " }\n",
166 | " tensor_content: \"\\000\\000\\200\\277\\000\\000\\000\\300\"\n",
167 | " }\n",
168 | " }\n",
169 | " }\n",
170 | "}\n",
171 | "library {\n",
172 | "}\n",
173 | "versions {\n",
174 | " producer: 134\n",
175 | "}\n",
176 | "]\n"
177 | ]
178 | }
179 | ],
180 | "source": [
181 | "print(metadata.partition_graphs)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": []
190 | }
191 | ],
192 | "metadata": {
193 | "kernelspec": {
194 | "display_name": "Python 3",
195 | "language": "python",
196 | "name": "python3"
197 | },
198 | "language_info": {
199 | "codemirror_mode": {
200 | "name": "ipython",
201 | "version": 3
202 | },
203 | "file_extension": ".py",
204 | "mimetype": "text/x-python",
205 | "name": "python",
206 | "nbconvert_exporter": "python",
207 | "pygments_lexer": "ipython3",
208 | "version": "3.7.0b1"
209 | }
210 | },
211 | "nbformat": 4,
212 | "nbformat_minor": 2
213 | }
214 |
--------------------------------------------------------------------------------
/ch02/Listing 2.09.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sess = tf.InteractiveSession()"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "raw_data = [1.,2.,8.,-1.,0.,5.5,6.,13]"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 4,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "spike = tf.Variable(False)"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 5,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "spike.initializer.run()"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 6,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "Spike False\n",
58 | "Spike True\n",
59 | "Spike False\n",
60 | "Spike False\n",
61 | "Spike True\n",
62 | "Spike False\n",
63 | "Spike True\n"
64 | ]
65 | }
66 | ],
67 | "source": [
68 | "for i in range(1, len(raw_data)):\n",
69 | " if raw_data[i] - raw_data[i-1] > 5:\n",
70 | " updater = tf.assign(spike, True)\n",
71 | " updater.eval()\n",
72 | " else:\n",
73 | " tf.assign(spike, False).eval()\n",
74 | " print(\"Spike\", spike.eval())\n"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 7,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "sess.close()"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": []
92 | }
93 | ],
94 | "metadata": {
95 | "kernelspec": {
96 | "display_name": "Python 3",
97 | "language": "python",
98 | "name": "python3"
99 | },
100 | "language_info": {
101 | "codemirror_mode": {
102 | "name": "ipython",
103 | "version": 3
104 | },
105 | "file_extension": ".py",
106 | "mimetype": "text/x-python",
107 | "name": "python",
108 | "nbconvert_exporter": "python",
109 | "pygments_lexer": "ipython3",
110 | "version": "3.7.0b1"
111 | }
112 | },
113 | "nbformat": 4,
114 | "nbformat_minor": 2
115 | }
116 |
--------------------------------------------------------------------------------
/ch02/Listing 2.10.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "sess = tf.InteractiveSession()"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "raw_data = [1.,2.,8.,-1.,0.,5.5,6.,13]"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 4,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "spikes = tf.Variable([False] * len(raw_data), name='spikes')"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 5,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "spikes.initializer.run()"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 6,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "saver = tf.train.Saver()"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 7,
60 | "metadata": {},
61 | "outputs": [
62 | {
63 | "name": "stdout",
64 | "output_type": "stream",
65 | "text": [
66 | "spikes data saved in file: ../models/spikes.ckpt\n"
67 | ]
68 | }
69 | ],
70 | "source": [
71 | "for i in range(1, len(raw_data)):\n",
72 | " if raw_data[i] - raw_data[i-1] > 5:\n",
73 | " spikes_val = spikes.eval()\n",
74 | " spikes_val[i] = True\n",
75 | " updater = tf.assign(spikes, spikes_val)\n",
76 | " updater.eval()\n",
77 | "\n",
78 | "save_path = saver.save(sess, \"../models/spikes.ckpt\")\n",
79 | "print(\"spikes data saved in file: %s\" % save_path)\n",
80 | "\n",
81 | "sess.close()"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": []
90 | }
91 | ],
92 | "metadata": {
93 | "kernelspec": {
94 | "display_name": "Python 3",
95 | "language": "python",
96 | "name": "python3"
97 | },
98 | "language_info": {
99 | "codemirror_mode": {
100 | "name": "ipython",
101 | "version": 3
102 | },
103 | "file_extension": ".py",
104 | "mimetype": "text/x-python",
105 | "name": "python",
106 | "nbconvert_exporter": "python",
107 | "pygments_lexer": "ipython3",
108 | "version": "3.7.0b1"
109 | }
110 | },
111 | "nbformat": 4,
112 | "nbformat_minor": 2
113 | }
114 |
--------------------------------------------------------------------------------
/ch02/Listing 2.11.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "sess = tf.InteractiveSession()"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "spikes = tf.Variable([False]*8, name='spikes')\n",
20 | "saver = tf.train.Saver()"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "[False False True False False True False True]\n"
33 | ]
34 | }
35 | ],
36 | "source": [
37 | "saver.restore(sess, \"../models/spikes.ckpt\")\n",
38 | "print(spikes.eval())"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": 4,
44 | "metadata": {},
45 | "outputs": [],
46 | "source": [
47 | "sess.close()"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {},
54 | "outputs": [],
55 | "source": []
56 | }
57 | ],
58 | "metadata": {
59 | "kernelspec": {
60 | "display_name": "Python 3",
61 | "language": "python",
62 | "name": "python3"
63 | },
64 | "language_info": {
65 | "codemirror_mode": {
66 | "name": "ipython",
67 | "version": 3
68 | },
69 | "file_extension": ".py",
70 | "mimetype": "text/x-python",
71 | "name": "python",
72 | "nbconvert_exporter": "python",
73 | "pygments_lexer": "ipython3",
74 | "version": "3.7.0b1"
75 | }
76 | },
77 | "nbformat": 4,
78 | "nbformat_minor": 2
79 | }
80 |
--------------------------------------------------------------------------------
/ch02/Listing 2.14.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "raw_data = np.random.normal(10, 1, 100)\n",
20 | "alpha = tf.constant(0.05)"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "curr_value = tf.placeholder(tf.float32)\n",
30 | "prev_avg = tf.Variable(0.)"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "update_avg = alpha * curr_value + (1 - alpha) * prev_avg"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 5,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "init = tf.global_variables_initializer()"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 6,
54 | "metadata": {},
55 | "outputs": [
56 | {
57 | "name": "stdout",
58 | "output_type": "stream",
59 | "text": [
60 | "8.73560672924146 0.43678036\n",
61 | "8.816243303235256 0.85575354\n",
62 | "12.405289777194788 1.4332304\n",
63 | "10.699670612166225 1.8965523\n",
64 | "10.936631979179092 2.3485563\n",
65 | "12.099988140354165 2.8361278\n",
66 | "10.588867914550402 3.223765\n",
67 | "10.62633022504505 3.593893\n",
68 | "10.478497797304229 3.9381232\n",
69 | "9.363736420693844 4.209404\n",
70 | "9.278142332845556 4.462841\n",
71 | "10.422899820302064 4.7608438\n",
72 | "8.642639772623923 4.954933\n",
73 | "8.89964117507796 5.1521683\n",
74 | "8.657434166974753 5.3274317\n",
75 | "10.639771508843575 5.5930486\n",
76 | "11.06625862838808 5.8667088\n",
77 | "10.827408382837612 6.1147437\n",
78 | "9.40412603020006 6.2792125\n",
79 | "9.467007455027906 6.4386024\n",
80 | "9.99858211164401 6.616601\n",
81 | "11.291436923730766 6.8503428\n",
82 | "10.55297443018526 7.0354743\n",
83 | "8.898007635098999 7.128601\n",
84 | "12.857690336711048 7.4150558\n",
85 | "8.576427749134089 7.4731245\n",
86 | "8.9422753327299 7.546582\n",
87 | "10.234056069810322 7.680956\n",
88 | "8.410075733975127 7.7174115\n",
89 | "10.582903024756128 7.8606863\n",
90 | "10.445961805998158 7.98995\n",
91 | "10.945585476441495 8.137732\n",
92 | "10.35106223452857 8.248398\n",
93 | "12.395500922218694 8.455753\n",
94 | "10.366463596670199 8.551289\n",
95 | "9.724900008552128 8.609969\n",
96 | "9.21384006011963 8.640163\n",
97 | "8.20680098817731 8.618495\n",
98 | "11.90201302386308 8.782671\n",
99 | "10.564847759296384 8.871779\n",
100 | "9.451275924574198 8.900754\n",
101 | "9.806911784521365 8.946062\n",
102 | "10.497355630423517 9.023627\n",
103 | "8.250609281395636 8.984977\n",
104 | "10.780382040311762 9.074747\n",
105 | "10.475183908922617 9.144769\n",
106 | "9.84297494891543 9.179679\n",
107 | "10.76077491349346 9.258734\n",
108 | "9.87838865095277 9.289717\n",
109 | "9.832226934730802 9.316842\n",
110 | "9.91451973236359 9.346725\n",
111 | "8.169955868587069 9.287887\n",
112 | "9.4113094651193 9.294058\n",
113 | "12.588747250292984 9.458793\n",
114 | "11.614328073134349 9.566569\n",
115 | "8.756283272181825 9.526054\n",
116 | "9.42527426226902 9.521015\n",
117 | "9.091106720369456 9.499519\n",
118 | "9.87699878174914 9.518393\n",
119 | "11.029047748320501 9.593925\n",
120 | "8.982782430100931 9.563368\n",
121 | "11.097179168410722 9.6400585\n",
122 | "10.779314755086752 9.6970215\n",
123 | "8.95437311962296 9.659889\n",
124 | "9.943352049267096 9.674062\n",
125 | "10.405797655406552 9.710648\n",
126 | "9.718021922726967 9.711016\n",
127 | "7.640616414478513 9.607495\n",
128 | "11.422782305576098 9.698259\n",
129 | "7.699156728334314 9.598305\n",
130 | "9.998392294411147 9.618309\n",
131 | "10.192809197524342 9.647034\n",
132 | "10.55384486604305 9.692373\n",
133 | "9.78157943112437 9.696833\n",
134 | "8.28339764994362 9.626162\n",
135 | "9.492083354376863 9.619458\n",
136 | "11.49240487643782 9.713105\n",
137 | "9.864362979162943 9.720668\n",
138 | "9.102758787066685 9.689773\n",
139 | "8.780976695502435 9.644333\n",
140 | "9.897556242514868 9.656994\n",
141 | "12.029257802894167 9.775607\n",
142 | "10.74416685255439 9.824036\n",
143 | "10.247604271495371 9.845214\n",
144 | "9.22429159423789 9.814168\n",
145 | "10.055761104728834 9.826248\n",
146 | "7.936765916479491 9.731773\n",
147 | "10.910799804852461 9.790725\n",
148 | "10.236953466768806 9.813036\n",
149 | "10.201714850847239 9.83247\n",
150 | "9.596903190304971 9.820691\n",
151 | "10.481184209812689 9.853716\n",
152 | "10.804625640452118 9.901261\n",
153 | "9.335944684955452 9.872995\n",
154 | "9.873953976138115 9.873044\n",
155 | "10.328288752235803 9.895806\n",
156 | "9.512888261771588 9.87666\n",
157 | "11.548291322343744 9.960241\n",
158 | "10.632949977175628 9.993876\n",
159 | "10.094482688678326 9.998907\n"
160 | ]
161 | }
162 | ],
163 | "source": [
164 | "\n",
165 | "with tf.Session() as sess:\n",
166 | " sess.run(init)\n",
167 | " for i in range(len(raw_data)):\n",
168 | " curr_avg = sess.run(update_avg, feed_dict={curr_value: raw_data[i]})\n",
169 | " sess.run(tf.assign(prev_avg, curr_avg))\n",
170 | " print(raw_data[i], curr_avg)"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": []
179 | }
180 | ],
181 | "metadata": {
182 | "kernelspec": {
183 | "display_name": "Python 3",
184 | "language": "python",
185 | "name": "python3"
186 | },
187 | "language_info": {
188 | "codemirror_mode": {
189 | "name": "ipython",
190 | "version": 3
191 | },
192 | "file_extension": ".py",
193 | "mimetype": "text/x-python",
194 | "name": "python",
195 | "nbconvert_exporter": "python",
196 | "pygments_lexer": "ipython3",
197 | "version": "3.7.0b1"
198 | }
199 | },
200 | "nbformat": 4,
201 | "nbformat_minor": 2
202 | }
203 |
--------------------------------------------------------------------------------
/ch02/Listing 2.16.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "raw_data = np.random.normal(10, 1, 100)\n",
20 | "\n",
21 | "alpha = tf.constant(0.05)\n",
22 | "curr_value = tf.placeholder(tf.float32)\n",
23 | "prev_avg = tf.Variable(0.)\n",
24 | "update_avg = alpha * curr_value + (1 - alpha) * prev_avg"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 3,
30 | "metadata": {},
31 | "outputs": [],
32 | "source": [
33 | "avg_hist = tf.summary.scalar(\"running_average\", update_avg)\n",
34 | "value_hist = tf.summary.scalar(\"incoming_values\", curr_value)\n",
35 | "merged = tf.summary.merge_all()\n",
36 | "writer = tf.summary.FileWriter(\"../data/logs\")\n",
37 | "init = tf.global_variables_initializer()"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": 4,
43 | "metadata": {},
44 | "outputs": [
45 | {
46 | "name": "stdout",
47 | "output_type": "stream",
48 | "text": [
49 | "8.892086121381602 0.4446043\n",
50 | "11.678656752333826 1.0063069\n",
51 | "10.141918734133784 1.4630876\n",
52 | "11.575532498150974 1.9687097\n",
53 | "9.966530609775125 2.3686008\n",
54 | "8.698087218951711 2.685075\n",
55 | "10.134693512123714 3.057556\n",
56 | "9.329812045206182 3.3711686\n",
57 | "10.612642419810204 3.7332425\n",
58 | "9.158466010568214 4.0045037\n",
59 | "10.373021112726594 4.3229294\n",
60 | "8.393845308176882 4.526475\n",
61 | "8.467154101696174 4.723509\n",
62 | "10.14305574794267 4.994486\n",
63 | "9.9982997281831 5.2446766\n",
64 | "10.19548594742745 5.492217\n",
65 | "10.893782328184157 5.7622952\n",
66 | "10.210657566000727 5.984713\n",
67 | "10.013567952552364 6.186156\n",
68 | "9.0440295073779 6.329049\n",
69 | "10.723887068066562 6.548791\n",
70 | "8.298891639985447 6.636296\n",
71 | "10.622525545300153 6.8356075\n",
72 | "9.798328715137169 6.983743\n",
73 | "11.692681693147204 7.2191896\n",
74 | "11.94352688021393 7.4554067\n",
75 | "9.698282025137127 7.5675507\n",
76 | "10.111240398383893 7.694735\n",
77 | "8.16944211514892 7.71847\n",
78 | "11.005348355268968 7.882814\n",
79 | "8.272991381778489 7.902323\n",
80 | "10.013539215778303 8.007883\n",
81 | "8.814968606251588 8.048237\n",
82 | "10.841030081133354 8.187877\n",
83 | "9.668078624971445 8.261887\n",
84 | "9.30342685596161 8.313963\n",
85 | "9.52128288971733 8.374329\n",
86 | "9.051335191606377 8.408179\n",
87 | "10.071847361613132 8.491363\n",
88 | "8.778371367859595 8.5057125\n",
89 | "9.811430391461917 8.570999\n",
90 | "9.26185795818945 8.605542\n",
91 | "8.531758301630667 8.601853\n",
92 | "9.888478342399864 8.666184\n",
93 | "11.157202461245406 8.790735\n",
94 | "10.721114455234929 8.887254\n",
95 | "10.11455306472292 8.948619\n",
96 | "8.903533076776634 8.946365\n",
97 | "11.648904056968732 9.081492\n",
98 | "9.175676260173818 9.086202\n",
99 | "9.84208526064008 9.123996\n",
100 | "9.876386792357017 9.161615\n",
101 | "10.588756549349178 9.232972\n",
102 | "12.18345602470975 9.380496\n",
103 | "9.943486264740452 9.408646\n",
104 | "9.330458320568363 9.4047365\n",
105 | "9.996283956111842 9.434314\n",
106 | "9.48855051208554 9.437025\n",
107 | "10.472177536386713 9.488783\n",
108 | "10.435337986227402 9.53611\n",
109 | "10.382747947895945 9.578442\n",
110 | "10.480541591283814 9.623547\n",
111 | "9.178174387074247 9.601278\n",
112 | "8.634429870620531 9.552936\n",
113 | "10.484363741954114 9.599507\n",
114 | "10.77661246128205 9.658362\n",
115 | "11.618051944739873 9.756348\n",
116 | "10.32871854044874 9.7849655\n",
117 | "9.481352766582088 9.769785\n",
118 | "9.844736040715519 9.773533\n",
119 | "10.156098635673168 9.792661\n",
120 | "11.602684820873648 9.883162\n",
121 | "8.461753593933638 9.812092\n",
122 | "10.639975735563358 9.853486\n",
123 | "8.826547120557809 9.802138\n",
124 | "10.588628118308435 9.841463\n",
125 | "10.159785839351073 9.857379\n",
126 | "11.153342338937186 9.922176\n",
127 | "9.192619753430668 9.885698\n",
128 | "8.33555389499698 9.808191\n",
129 | "10.260946147782578 9.830829\n",
130 | "10.394246504619742 9.858999\n",
131 | "9.697754464550833 9.850937\n",
132 | "10.367336102496328 9.876757\n",
133 | "10.661413360477209 9.915989\n",
134 | "12.246358115240021 10.032507\n",
135 | "11.736444043729731 10.117704\n",
136 | "9.99775304894855 10.111707\n",
137 | "9.91764375580591 10.102003\n",
138 | "10.551705283291247 10.124488\n",
139 | "9.455920793229037 10.09106\n",
140 | "10.870044607322756 10.130009\n",
141 | "9.99738667201924 10.123378\n",
142 | "8.557689830186472 10.045093\n",
143 | "8.284319446774308 9.957054\n",
144 | "9.944320322658097 9.956417\n",
145 | "10.185297538084226 9.967861\n",
146 | "10.867294670389017 10.012833\n",
147 | "8.948754796987483 9.959629\n",
148 | "10.8632738149558 10.00481\n"
149 | ]
150 | }
151 | ],
152 | "source": [
153 | "with tf.Session() as sess:\n",
154 | " sess.run(init)\n",
155 | " writer.add_graph(sess.graph)\n",
156 | " for i in range(len(raw_data)):\n",
157 | " summary_str, curr_avg = sess.run([merged, update_avg],\n",
158 | " feed_dict={curr_value : raw_data[i]})\n",
159 | " sess.run(tf.assign(prev_avg, curr_avg))\n",
160 | " print(raw_data[i], curr_avg)\n",
161 | " writer.add_summary(summary_str, i)"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "metadata": {},
168 | "outputs": [],
169 | "source": []
170 | }
171 | ],
172 | "metadata": {
173 | "kernelspec": {
174 | "display_name": "Python 3",
175 | "language": "python",
176 | "name": "python3"
177 | },
178 | "language_info": {
179 | "codemirror_mode": {
180 | "name": "ipython",
181 | "version": 3
182 | },
183 | "file_extension": ".py",
184 | "mimetype": "text/x-python",
185 | "name": "python",
186 | "nbconvert_exporter": "python",
187 | "pygments_lexer": "ipython3",
188 | "version": "3.7.0b1"
189 | }
190 | },
191 | "nbformat": 4,
192 | "nbformat_minor": 2
193 | }
194 |
--------------------------------------------------------------------------------
/ch07/Listing 7.01.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "name": "stderr",
19 | "output_type": "stream",
20 | "text": [
21 | "WARNING: Logging before flag parsing goes to stderr.\n",
22 | "W0801 15:42:02.709945 4679296448 deprecation.py:323] From :3: string_input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
23 | "Instructions for updating:\n",
24 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
25 | "W0801 15:42:02.715996 4679296448 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/input.py:277: input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
26 | "Instructions for updating:\n",
27 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(input_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
28 | "W0801 15:42:02.718385 4679296448 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/input.py:189: limit_epochs (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
29 | "Instructions for updating:\n",
30 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.\n",
31 | "W0801 15:42:02.721618 4679296448 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/input.py:198: QueueRunner.__init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
32 | "Instructions for updating:\n",
33 | "To construct input pipelines, use the `tf.data` module.\n",
34 | "W0801 15:42:02.724742 4679296448 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/input.py:198: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
35 | "Instructions for updating:\n",
36 | "To construct input pipelines, use the `tf.data` module.\n",
37 | "W0801 15:42:02.730230 4679296448 deprecation.py:323] From :4: WholeFileReader.__init__ (from tensorflow.python.ops.io_ops) is deprecated and will be removed in a future version.\n",
38 | "Instructions for updating:\n",
39 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(tf.read_file)`.\n"
40 | ]
41 | }
42 | ],
43 | "source": [
44 | "filenames = tf.train.match_filenames_once('../data/audio_dataset/*.wav')\n",
45 | "count_num_files = tf.size(filenames)\n",
46 | "filename_queue = tf.train.string_input_producer(filenames)\n",
47 | "reader = tf.WholeFileReader()\n",
48 | "filename, file_contents = reader.read(filename_queue)"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 3,
54 | "metadata": {},
55 | "outputs": [
56 | {
57 | "name": "stderr",
58 | "output_type": "stream",
59 | "text": [
60 | "W0801 15:42:12.034403 4679296448 deprecation.py:323] From :6: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
61 | "Instructions for updating:\n",
62 | "To construct input pipelines, use the `tf.data` module.\n"
63 | ]
64 | },
65 | {
66 | "name": "stdout",
67 | "output_type": "stream",
68 | "text": [
69 | "b'../data/audio_dataset/scream_3.wav'\n",
70 | "b'../data/audio_dataset/cough_2.wav'\n",
71 | "b'../data/audio_dataset/scream_1.wav'\n",
72 | "b'../data/audio_dataset/cough_1.wav'\n",
73 | "b'../data/audio_dataset/scream_2.wav'\n"
74 | ]
75 | }
76 | ],
77 | "source": [
78 | "with tf.Session() as sess:\n",
79 | " sess.run(tf.local_variables_initializer()) \n",
80 | " num_files = sess.run(count_num_files)\n",
81 | " \n",
82 | " coord = tf.train.Coordinator()\n",
83 | " threads = tf.train.start_queue_runners(coord=coord)\n",
84 | " \n",
85 | " for i in range(num_files):\n",
86 | " audio_file = sess.run(filename)\n",
87 | " print(audio_file)"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": []
96 | }
97 | ],
98 | "metadata": {
99 | "kernelspec": {
100 | "display_name": "Python 3",
101 | "language": "python",
102 | "name": "python3"
103 | },
104 | "language_info": {
105 | "codemirror_mode": {
106 | "name": "ipython",
107 | "version": 3
108 | },
109 | "file_extension": ".py",
110 | "mimetype": "text/x-python",
111 | "name": "python",
112 | "nbconvert_exporter": "python",
113 | "pygments_lexer": "ipython3",
114 | "version": "3.7.0b1"
115 | }
116 | },
117 | "nbformat": 4,
118 | "nbformat_minor": 2
119 | }
120 |
--------------------------------------------------------------------------------
/ch07/Listing 7.02.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from bregman.suite import *"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "def get_chromagram(audio_file):\n",
19 | " F = Chromagram(audio_file, nfft=16384, wfft=8192, nhop=2205)\n",
20 | " return F.X"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | "[[2.75141202e-03 2.36929060e-03 2.13343277e-03 2.40018116e-03\n",
33 | " 3.71176630e-03 4.42645075e-03 3.68658979e-03 2.61372694e-03\n",
34 | " 2.37767323e-03 2.48213828e-03 2.30193751e-03 1.56249217e-03\n",
35 | " 1.87126988e-03 2.84709574e-03 2.76987376e-03 1.76381313e-03\n",
36 | " 1.01863551e-03 6.11578349e-04 3.18814576e-04 3.27155557e-04\n",
37 | " 0.00000000e+00 0.00000000e+00]\n",
38 | " [3.26296588e-03 3.83463069e-03 3.25144962e-03 2.66948922e-03\n",
39 | " 3.21321792e-03 3.12241859e-03 2.36434433e-03 2.14447493e-03\n",
40 | " 1.91561392e-03 1.95447839e-03 2.00165146e-03 1.51089335e-03\n",
41 | " 1.63657225e-03 3.05552071e-03 3.14662850e-03 1.80870904e-03\n",
42 | " 1.08529934e-03 6.70629965e-04 4.00645891e-04 4.60234999e-04\n",
43 | " 0.00000000e+00 0.00000000e+00]\n",
44 | " [2.66173073e-03 3.01051233e-03 3.16460160e-03 3.16874609e-03\n",
45 | " 3.41118299e-03 3.04248508e-03 2.06304964e-03 1.77997858e-03\n",
46 | " 1.80688236e-03 1.84552476e-03 2.03517651e-03 1.72105913e-03\n",
47 | " 1.81066522e-03 2.73184439e-03 2.67112497e-03 1.71899097e-03\n",
48 | " 1.15326209e-03 7.66976257e-04 4.67182171e-04 4.44636216e-04\n",
49 | " 0.00000000e+00 0.00000000e+00]\n",
50 | " [2.15976359e-03 2.26651846e-03 3.03947436e-03 4.20493602e-03\n",
51 | " 4.72716781e-03 3.71622414e-03 2.43796443e-03 1.90860883e-03\n",
52 | " 1.65539954e-03 1.54675894e-03 1.79420644e-03 1.72973602e-03\n",
53 | " 2.82797755e-03 3.78177621e-03 3.01336982e-03 1.74594996e-03\n",
54 | " 1.14151693e-03 7.61323898e-04 3.19320724e-04 2.32448107e-04\n",
55 | " 0.00000000e+00 0.00000000e+00]\n",
56 | " [1.67844895e-03 2.05160731e-03 2.70583483e-03 4.01425742e-03\n",
57 | " 4.55356724e-03 3.67706159e-03 2.54458011e-03 2.13523059e-03\n",
58 | " 1.82957353e-03 1.54495375e-03 1.75274646e-03 1.91854487e-03\n",
59 | " 3.12014292e-03 3.80368763e-03 2.82853107e-03 1.74851646e-03\n",
60 | " 1.15653311e-03 7.07785017e-04 2.10694255e-04 1.17944799e-04\n",
61 | " 0.00000000e+00 0.00000000e+00]\n",
62 | " [1.48229011e-03 1.89188339e-03 2.26557197e-03 3.00525702e-03\n",
63 | " 3.42837552e-03 2.89195496e-03 2.09994005e-03 2.06199721e-03\n",
64 | " 2.01362851e-03 1.98436569e-03 2.51413137e-03 2.12464218e-03\n",
65 | " 2.21438437e-03 2.74979884e-03 2.22311541e-03 1.49082556e-03\n",
66 | " 1.16099335e-03 6.10254360e-04 1.80882098e-04 8.93608062e-05\n",
67 | " 0.00000000e+00 0.00000000e+00]\n",
68 | " [1.43134121e-03 1.52794379e-03 1.77116812e-03 2.11090181e-03\n",
69 | " 2.41132944e-03 2.08656223e-03 1.57082576e-03 1.64927977e-03\n",
70 | " 1.87929000e-03 2.25654645e-03 2.48049489e-03 1.89559778e-03\n",
71 | " 1.57684630e-03 1.97750681e-03 1.95908987e-03 1.68366779e-03\n",
72 | " 1.22329863e-03 5.53307284e-04 1.82663563e-04 1.12193482e-04\n",
73 | " 0.00000000e+00 0.00000000e+00]\n",
74 | " [1.97352226e-03 1.79848895e-03 1.73131712e-03 1.88175001e-03\n",
75 | " 2.46864738e-03 2.41677255e-03 1.88785342e-03 2.29219233e-03\n",
76 | " 2.68518565e-03 2.47135916e-03 2.25242272e-03 1.89359109e-03\n",
77 | " 1.39920501e-03 1.26076775e-03 1.46643411e-03 1.33079648e-03\n",
78 | " 8.59710360e-04 4.41357473e-04 2.06586749e-04 1.94347412e-04\n",
79 | " 0.00000000e+00 0.00000000e+00]\n",
80 | " [2.20976342e-03 2.37033384e-03 2.28733024e-03 2.04635847e-03\n",
81 | " 2.86671914e-03 2.86495829e-03 2.43429984e-03 3.84226907e-03\n",
82 | " 4.12304013e-03 3.11479536e-03 2.37341596e-03 1.88121021e-03\n",
83 | " 1.42452789e-03 1.19824971e-03 1.06608486e-03 8.64486368e-04\n",
84 | " 6.44594760e-04 3.95132127e-04 2.43087500e-04 3.02235054e-04\n",
85 | " 0.00000000e+00 0.00000000e+00]\n",
86 | " [1.87029015e-03 2.49069534e-03 3.32634363e-03 3.79662079e-03\n",
87 | " 3.91441178e-03 3.46810958e-03 2.84728409e-03 4.62519926e-03\n",
88 | " 5.21882837e-03 3.93626135e-03 2.29834307e-03 1.60544333e-03\n",
89 | " 1.39390454e-03 1.42608940e-03 1.20434792e-03 9.40077038e-04\n",
90 | " 7.36561862e-04 4.49335702e-04 2.58784707e-04 3.41163426e-04\n",
91 | " 0.00000000e+00 0.00000000e+00]\n",
92 | " [1.81054180e-03 2.12559236e-03 3.21681473e-03 4.23905650e-03\n",
93 | " 4.33781244e-03 4.22280385e-03 3.76708075e-03 3.92965498e-03\n",
94 | " 3.97223185e-03 3.56806534e-03 2.57418156e-03 1.70383854e-03\n",
95 | " 1.77509750e-03 2.13701787e-03 1.92104977e-03 1.29223505e-03\n",
96 | " 7.15608873e-04 4.26632802e-04 2.06739796e-04 2.19874840e-04\n",
97 | " 0.00000000e+00 0.00000000e+00]\n",
98 | " [2.17214536e-03 1.73258187e-03 2.14077239e-03 2.79241926e-03\n",
99 | " 4.20690559e-03 5.11945684e-03 4.21478123e-03 2.61472219e-03\n",
100 | " 2.64889422e-03 2.97074151e-03 2.52577058e-03 1.51281186e-03\n",
101 | " 2.20864635e-03 3.04617123e-03 2.59512256e-03 1.53460602e-03\n",
102 | " 8.41107339e-04 4.81852722e-04 2.34930287e-04 2.33275462e-04\n",
103 | " 0.00000000e+00 0.00000000e+00]]\n"
104 | ]
105 | }
106 | ],
107 | "source": [
108 | "X = get_chromagram('../data/audio_dataset/cough_1.wav')\n",
109 | "print(X)"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": 4,
115 | "metadata": {},
116 | "outputs": [
117 | {
118 | "name": "stdout",
119 | "output_type": "stream",
120 | "text": [
121 | "(12, 22)\n"
122 | ]
123 | }
124 | ],
125 | "source": [
126 | "num_features, num_samples = X.shape\n",
127 | "print(num_features, num_samples)"
128 | ]
129 | },
130 | {
131 | "cell_type": "code",
132 | "execution_count": null,
133 | "metadata": {},
134 | "outputs": [],
135 | "source": []
136 | }
137 | ],
138 | "metadata": {
139 | "kernelspec": {
140 | "display_name": "Python 2",
141 | "language": "python",
142 | "name": "python2"
143 | },
144 | "language_info": {
145 | "codemirror_mode": {
146 | "name": "ipython",
147 | "version": 2
148 | },
149 | "file_extension": ".py",
150 | "mimetype": "text/x-python",
151 | "name": "python",
152 | "nbconvert_exporter": "python",
153 | "pygments_lexer": "ipython2",
154 | "version": "2.7.14"
155 | }
156 | },
157 | "nbformat": 4,
158 | "nbformat_minor": 2
159 | }
160 |
--------------------------------------------------------------------------------
/ch07/Listing 7.04.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "from bregman.suite import * \n",
11 | "import tensorflow as tf"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "k = 2\n",
21 | "max_iterations = 100"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 3,
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stdout",
31 | "output_type": "stream",
32 | "text": [
33 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
34 | "Instructions for updating:\n",
35 | "Colocations handled automatically by placer.\n",
36 | "WARNING:tensorflow:From :3: string_input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
37 | "Instructions for updating:\n",
38 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(string_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
39 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/training/input.py:278: input_producer (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
40 | "Instructions for updating:\n",
41 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(input_tensor).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.\n",
42 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/training/input.py:190: limit_epochs (from tensorflow.python.training.input) is deprecated and will be removed in a future version.\n",
43 | "Instructions for updating:\n",
44 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.\n",
45 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/training/input.py:199: __init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
46 | "Instructions for updating:\n",
47 | "To construct input pipelines, use the `tf.data` module.\n",
48 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/training/input.py:199: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
49 | "Instructions for updating:\n",
50 | "To construct input pipelines, use the `tf.data` module.\n",
51 | "WARNING:tensorflow:From /Users/mattmann/git/buildout.python/python-2.7/lib/python2.7/site-packages/tensorflow/python/training/input.py:202: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
52 | "Instructions for updating:\n",
53 | "Use tf.cast instead.\n",
54 | "WARNING:tensorflow:From :4: __init__ (from tensorflow.python.ops.io_ops) is deprecated and will be removed in a future version.\n",
55 | "Instructions for updating:\n",
56 | "Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.map(tf.read_file)`.\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "filenames = tf.train.match_filenames_once('../data/audio_dataset/*.wav')\n",
62 | "count_num_files = tf.size(filenames)\n",
63 | "filename_queue = tf.train.string_input_producer(filenames)\n",
64 | "reader = tf.WholeFileReader()\n",
65 | "filename, file_contents = reader.read(filename_queue)\n",
66 | "chroma = tf.placeholder(tf.float32)\n",
67 | "max_freqs = tf.argmax(chroma, 0)"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 4,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": [
76 | "def get_next_chromagram(sess):\n",
77 | " audio_file = sess.run(filename)\n",
78 | " F = Chromagram(audio_file, nfft=16384, wfft=8192, nhop=2205)\n",
79 | " return F.X"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 5,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "def initial_cluster_centroids(X, k):\n",
89 | " return X[0:k, :]"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 6,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "def assign_cluster(X, centroids):\n",
99 | " expanded_vectors = tf.expand_dims(X, 0) # 1, 5, 12\n",
100 | " expanded_centroids = tf.expand_dims(centroids, 1) #2, 1, 12\n",
101 | " distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2) #2, 5\n",
102 | " mins = tf.argmin(distances, 0)\n",
103 | " return mins"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 7,
109 | "metadata": {},
110 | "outputs": [],
111 | "source": [
112 | "def recompute_centroids(X, Y):\n",
113 | " sums = tf.unsorted_segment_sum(X, Y, k)\n",
114 | " counts = tf.unsorted_segment_sum(tf.ones_like(X), Y, k)\n",
115 | " return sums / counts"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": 8,
121 | "metadata": {},
122 | "outputs": [],
123 | "source": [
124 | "def extract_feature_vector(sess, chroma_data):\n",
125 | " num_features, num_samples = np.shape(chroma_data)\n",
126 | " freq_vals = sess.run(max_freqs, feed_dict={chroma: chroma_data})\n",
127 | " hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))\n",
128 | " return hist.astype(float) / num_samples"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 9,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "def get_dataset(sess):\n",
138 | " num_files = sess.run(count_num_files)\n",
139 | " coord = tf.train.Coordinator()\n",
140 | " threads = tf.train.start_queue_runners(coord=coord)\n",
141 | " xs = []\n",
142 | " for _ in range(num_files):\n",
143 | " chroma_data = get_next_chromagram(sess)\n",
144 | " x = [extract_feature_vector(sess, chroma_data)]\n",
145 | " x = np.matrix(x)\n",
146 | " if len(xs) == 0:\n",
147 | " xs = x\n",
148 | " else:\n",
149 | " xs = np.vstack((xs, x))\n",
150 | " return xs"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": 10,
156 | "metadata": {},
157 | "outputs": [
158 | {
159 | "name": "stdout",
160 | "output_type": "stream",
161 | "text": [
162 | "WARNING:tensorflow:From :4: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.\n",
163 | "Instructions for updating:\n",
164 | "To construct input pipelines, use the `tf.data` module.\n",
165 | "[[0.20750733 0.07575758 0.08406647 0.0259042 0.04363636 0.06848485\n",
166 | " 0.16482893 0.10408602 0.05075269 0.06060606 0.08406647 0.03030303]\n",
167 | " [0.03533354 0. 0. 0.02040816 0.60234542 0.25982333\n",
168 | " 0. 0.08208955 0. 0. 0. 0. ]]\n",
169 | "INFO:tensorflow:Error reported to Coordinator: , Enqueue operation was cancelled\n",
170 | "\t [[{{node input_producer/input_producer_EnqueueMany}}]]\n"
171 | ]
172 | }
173 | ],
174 | "source": [
175 | "with tf.Session() as sess:\n",
176 | " sess.run(tf.local_variables_initializer())\n",
177 | " X = get_dataset(sess)\n",
178 | " centroids = initial_cluster_centroids(X, k)\n",
179 | " i, converged = 0, False\n",
180 | " while not converged and i < max_iterations:\n",
181 | " i += 1\n",
182 | " Y = assign_cluster(X, centroids)\n",
183 | " centroids = sess.run(recompute_centroids(X, Y))\n",
184 | " print(centroids)"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": []
193 | }
194 | ],
195 | "metadata": {
196 | "kernelspec": {
197 | "display_name": "Python 2",
198 | "language": "python",
199 | "name": "python2"
200 | },
201 | "language_info": {
202 | "codemirror_mode": {
203 | "name": "ipython",
204 | "version": 2
205 | },
206 | "file_extension": ".py",
207 | "mimetype": "text/x-python",
208 | "name": "python",
209 | "nbconvert_exporter": "python",
210 | "pygments_lexer": "ipython2",
211 | "version": "2.7.14"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 2
216 | }
217 |
--------------------------------------------------------------------------------
/ch07/Listing 7.05-7.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np\n",
11 | "from bregman.suite import *"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "k = 2\n",
21 | "segment_size = 50\n",
22 | "max_iterations = 100"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 3,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "chroma = tf.placeholder(tf.float32)\n",
32 | "max_freqs = tf.argmax(chroma, 0)"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": 4,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "def get_chromagram(audio_file):\n",
42 | " F = Chromagram(audio_file, nfft=16384, wfft=8192, nhop=2205)\n",
43 | " return F.X"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 5,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": [
52 | "def get_dataset(sess, audio_file):\n",
53 | " chroma_data = get_chromagram(audio_file)\n",
54 | " print('chroma_data', np.shape(chroma_data))\n",
55 | " chroma_length = np.shape(chroma_data)[1]\n",
56 | " print('chroma_length', chroma_length)\n",
57 | " xs = []\n",
58 | " for i in range(chroma_length / segment_size):\n",
59 | " chroma_segment = chroma_data[:, i*segment_size:(i+1)*segment_size]\n",
60 | " x = extract_feature_vector(sess, chroma_segment)\n",
61 | " if len(xs) == 0:\n",
62 | " xs = x\n",
63 | " else:\n",
64 | " xs = np.vstack((xs, x))\n",
65 | " return xs"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 6,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "def extract_feature_vector(sess, chroma_data):\n",
75 | " num_features, num_samples = np.shape(chroma_data)\n",
76 | " freq_vals = sess.run(max_freqs, feed_dict={chroma: chroma_data})\n",
77 | " hist, bins = np.histogram(freq_vals, bins=range(num_features + 1))\n",
78 | " return hist.astype(float) / num_samples"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 7,
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "def initial_cluster_centroids(X, k):\n",
88 | " return X[0:k, :]"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 8,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "def recompute_centroids(X, Y):\n",
98 | " sums = tf.unsorted_segment_sum(X, Y, k)\n",
99 | " counts = tf.unsorted_segment_sum(tf.ones_like(X), Y, k)\n",
100 | " return sums / counts"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 9,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "def assign_cluster(X, centroids):\n",
110 | " expanded_vectors = tf.expand_dims(X, 0)\n",
111 | " expanded_centroids = tf.expand_dims(centroids, 1)\n",
112 | " distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)\n",
113 | " mins = tf.argmin(distances, 0)\n",
114 | " return mins"
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": 10,
120 | "metadata": {},
121 | "outputs": [
122 | {
123 | "name": "stdout",
124 | "output_type": "stream",
125 | "text": [
126 | "('chroma_data', (12, 633))\n",
127 | "('chroma_length', 633)\n",
128 | "(12, 12)\n",
129 | "('iteration', 50)\n",
130 | "('iteration', 100)\n",
131 | "('0.0m 0.0s', 0)\n",
132 | "('0.0m 5.0s', 1)\n",
133 | "('0.0m 10.0s', 0)\n",
134 | "('0.0m 15.0s', 1)\n",
135 | "('0.0m 20.0s', 1)\n",
136 | "('0.0m 25.0s', 1)\n",
137 | "('0.0m 30.0s', 1)\n",
138 | "('0.0m 35.0s', 0)\n",
139 | "('0.0m 40.0s', 1)\n",
140 | "('0.0m 45.0s', 1)\n",
141 | "('0.0m 50.0s', 0)\n",
142 | "('0.0m 55.0s', 0)\n"
143 | ]
144 | }
145 | ],
146 | "source": [
147 | "with tf.Session() as sess:\n",
148 | " X = get_dataset(sess, '../data/TalkingMachinesPodcast.wav')\n",
149 | " print(np.shape(X))\n",
150 | " centroids = initial_cluster_centroids(X, k)\n",
151 | " i, converged = 0, False\n",
152 | " while not converged and i < max_iterations:\n",
153 | " i += 1\n",
154 | " Y = assign_cluster(X, centroids)\n",
155 | " centroids = sess.run(recompute_centroids(X, Y))\n",
156 | " if i % 50 == 0:\n",
157 | " print('iteration', i)\n",
158 | " \n",
159 | " segments = sess.run(Y)\n",
160 | " for i in range(len(segments)):\n",
161 | " seconds = (i * segment_size) / float(10)\n",
162 | " min, sec = divmod(seconds, 60)\n",
163 | " time_str = '{}m {}s'.format(min, sec)\n",
164 | " print(time_str, segments[i])"
165 | ]
166 | },
167 | {
168 | "cell_type": "code",
169 | "execution_count": null,
170 | "metadata": {},
171 | "outputs": [],
172 | "source": []
173 | }
174 | ],
175 | "metadata": {
176 | "kernelspec": {
177 | "display_name": "Python 2",
178 | "language": "python",
179 | "name": "python2"
180 | },
181 | "language_info": {
182 | "codemirror_mode": {
183 | "name": "ipython",
184 | "version": 2
185 | },
186 | "file_extension": ".py",
187 | "mimetype": "text/x-python",
188 | "name": "python",
189 | "nbconvert_exporter": "python",
190 | "pygments_lexer": "ipython2",
191 | "version": "2.7.14"
192 | }
193 | },
194 | "nbformat": 4,
195 | "nbformat_minor": 2
196 | }
197 |
--------------------------------------------------------------------------------
/ch09/Listing 9.07-9.11.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import tensorflow as tf "
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "class HMM(object):\n",
20 | " def __init__(self, initial_prob, trans_prob, obs_prob):\n",
21 | " self.N = np.size(initial_prob)\n",
22 | " self.initial_prob = initial_prob\n",
23 | " self.trans_prob = trans_prob\n",
24 | " self.emission = tf.constant(obs_prob)\n",
25 | " assert self.initial_prob.shape == (self.N, 1)\n",
26 | " assert self.trans_prob.shape == (self.N, self.N)\n",
27 | " assert obs_prob.shape[0] == self.N\n",
28 | " self.obs_idx = tf.placeholder(tf.int32)\n",
29 | " self.fwd = tf.placeholder(tf.float64)\n",
30 | " self.viterbi = tf.placeholder(tf.float64)\n",
31 | " \n",
32 | " def get_emission(self, obs_idx):\n",
33 | " slice_location = [0, obs_idx]\n",
34 | " num_rows = tf.shape(self.emission)[0]\n",
35 | " slice_shape = [num_rows, 1]\n",
36 | " return tf.slice(self.emission, slice_location, slice_shape)\n",
37 | " \n",
38 | " def forward_init_op(self):\n",
39 | " obs_prob = self.get_emission(self.obs_idx)\n",
40 | " fwd = tf.multiply(self.initial_prob, obs_prob)\n",
41 | " return fwd\n",
42 | " \n",
43 | " def forward_op(self):\n",
44 | " transitions = tf.matmul(self.fwd,\n",
45 | " tf.transpose(self.get_emission(self.obs_idx)))\n",
46 | " weighted_transitions = transitions * self.trans_prob\n",
47 | " fwd = tf.reduce_sum(weighted_transitions, 0)\n",
48 | " \n",
49 | " def decode_op(self):\n",
50 | " transitions = tf.matmul(self.viterbi,\n",
51 | " tf.transpose(self.get_emission(self.obs_idx)))\n",
52 | " weighted_transitions = transitions * self.trans_prob\n",
53 | " viterbi = tf.reduce_max(weighted_transitions, 0)\n",
54 | " return tf.reshape(viterbi, tf.shape(self.viterbi))\n",
55 | "\n",
56 | " def backpt_op(self):\n",
57 | " back_transitions = tf.matmul(self.viterbi, np.ones((1, self.N)))\n",
58 | " weighted_back_transitions = back_transitions * self.trans_prob\n",
59 | " return tf.argmax(weighted_back_transitions, 0) "
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": 3,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "def forward_algorithm(sess, hmm, observations):\n",
69 | " fwd = sess.run(hmm.forward_init_op(), feed_dict={hmm.obs_idx:observations[0]})\n",
70 | " for t in range(1, len(observations)):\n",
71 | " fwd = sess.run(hmm.forward_op(), feed_dict={hmm.obs_idx:observations[t], hmm.fwd: fwd})\n",
72 | " prob = sess.run(tf.reduce_sum(fwd))\n",
73 | " return prob"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 4,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "def viterbi_decode(sess, hmm, observations):\n",
83 | " viterbi = sess.run(hmm.forward_init_op(), feed_dict={hmm.obs_idx:\n",
84 | " observations[0]})\n",
85 | " backpts = np.ones((hmm.N, len(observations)), 'int32') * -1\n",
86 | " for t in range(1, len(observations)):\n",
87 | " viterbi, backpt = sess.run([hmm.decode_op(), hmm.backpt_op()],\n",
88 | " feed_dict={hmm.obs_idx: observations[t],\n",
89 | " hmm.viterbi: viterbi})\n",
90 | " backpts[:, t] = backpt\n",
91 | " tokens = [viterbi[:, -1].argmax()]\n",
92 | " for i in range(len(observations) - 1, 0, -1):\n",
93 | " tokens.append(backpts[tokens[-1], i])\n",
94 | " return tokens[::-1]\n"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 5,
100 | "metadata": {},
101 | "outputs": [
102 | {
103 | "name": "stdout",
104 | "output_type": "stream",
105 | "text": [
106 | "Most likely hidden states are [1, 0, 0, 0, 0]\n"
107 | ]
108 | }
109 | ],
110 | "source": [
111 | "initial_prob = np.array([[0.6],[0.4]])\n",
112 | "trans_prob = np.array([[0.7, 0.3],\n",
113 | " [0.4, 0.6]])\n",
114 | "obs_prob = np.array([[0.1, 0.4, 0.5],\n",
115 | " [0.6, 0.3, 0.1]])\n",
116 | "hmm = HMM(initial_prob=initial_prob, trans_prob=trans_prob, obs_prob=obs_prob)\n",
117 | "observations = [0, 1, 1, 2, 1]\n",
118 | "with tf.Session() as sess:\n",
119 | " seq = viterbi_decode(sess, hmm, observations)\n",
120 | " print('Most likely hidden states are {}'.format(seq))\n"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": []
129 | }
130 | ],
131 | "metadata": {
132 | "kernelspec": {
133 | "display_name": "Python 3",
134 | "language": "python",
135 | "name": "python3"
136 | },
137 | "language_info": {
138 | "codemirror_mode": {
139 | "name": "ipython",
140 | "version": 3
141 | },
142 | "file_extension": ".py",
143 | "mimetype": "text/x-python",
144 | "name": "python",
145 | "nbconvert_exporter": "python",
146 | "pygments_lexer": "ipython3",
147 | "version": "3.7.0b1"
148 | }
149 | },
150 | "nbformat": 4,
151 | "nbformat_minor": 2
152 | }
153 |
--------------------------------------------------------------------------------
/ch11/Listing 11.01-11.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np\n",
11 | "from sklearn import datasets"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "class Autoencoder:\n",
21 | " def __init__(self, input_dim, hidden_dim, epoch=250, learning_rate=0.001):\n",
22 | " self.epoch = epoch\n",
23 | " self.learning_rate = learning_rate\n",
24 | " x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])\n",
25 | " with tf.name_scope('encode'):\n",
26 | " weights = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name='weights')\n",
27 | " biases = tf.Variable(tf.zeros([hidden_dim]), name='biases')\n",
28 | " encoded = tf.nn.tanh(tf.matmul(x, weights) + biases)\n",
29 | " with tf.name_scope('decode'):\n",
30 | " weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name='weights')\n",
31 | " biases = tf.Variable(tf.zeros([input_dim]), name='biases')\n",
32 | " decoded = tf.matmul(encoded, weights) + biases\n",
33 | " \n",
34 | " self.x = x\n",
35 | " self.encoded = encoded\n",
36 | " self.decoded = decoded\n",
37 | " self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.x, self.decoded))))\n",
38 | " self.train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)\n",
39 | " self.saver = tf.train.Saver()\n",
40 | " \n",
41 | " def train(self, data):\n",
42 | " num_samples = len(data)\n",
43 | " \n",
44 | " with tf.Session() as sess:\n",
45 | " loss_summary = tf.summary.scalar(\"loss\", self.loss)\n",
46 | " merged = tf.summary.merge_all()\n",
47 | " writer = tf.summary.FileWriter(\"../data/logs\")\n",
48 | " writer.add_graph(sess.graph)\n",
49 | " sess.run(tf.global_variables_initializer())\n",
50 | " for i in range(self.epoch):\n",
51 | " for j in range(num_samples):\n",
52 | " summary_str, l, _ = sess.run([merged, self.loss, self.train_op], feed_dict={self.x: [data[j]]})\n",
53 | " writer.add_summary(summary_str, i) \n",
54 | " if i % 10 == 0:\n",
55 | " print('epoch {0}: loss = {1}'.format(i, l))\n",
56 | " self.saver.save(sess, '../models/model.ckpt') \n",
57 | " self.saver.save(sess, '../models/model.ckpt')\n",
58 | " \n",
59 | " def test(self, data): \n",
60 | " with tf.Session() as sess:\n",
61 | " self.saver.restore(sess, '../models/model.ckpt')\n",
62 | " hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})\n",
63 | " print('input', data)\n",
64 | " print('compressed', hidden)\n",
65 | " print('reconstructed', reconstructed)\n",
66 | " return reconstructed"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 3,
72 | "metadata": {},
73 | "outputs": [
74 | {
75 | "name": "stderr",
76 | "output_type": "stream",
77 | "text": [
78 | "WARNING: Logging before flag parsing goes to stderr.\n",
79 | "W0801 16:31:22.297580 4741227968 deprecation.py:506] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/rmsprop.py:119: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
80 | "Instructions for updating:\n",
81 | "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
82 | ]
83 | },
84 | {
85 | "name": "stdout",
86 | "output_type": "stream",
87 | "text": [
88 | "epoch 0: loss = 4.653130054473877\n",
89 | "epoch 10: loss = 1.6325196027755737\n",
90 | "epoch 20: loss = 0.7741631269454956\n",
91 | "epoch 30: loss = 0.47073864936828613\n",
92 | "epoch 40: loss = 0.41037118434906006\n",
93 | "epoch 50: loss = 0.3657050132751465\n",
94 | "epoch 60: loss = 0.33773884177207947\n",
95 | "epoch 70: loss = 0.35430505871772766\n",
96 | "epoch 80: loss = 0.3498811423778534\n",
97 | "epoch 90: loss = 0.3443930745124817\n",
98 | "epoch 100: loss = 0.3376612663269043\n",
99 | "epoch 110: loss = 0.3295210301876068\n",
100 | "epoch 120: loss = 0.32012617588043213\n",
101 | "epoch 130: loss = 0.30963343381881714\n",
102 | "epoch 140: loss = 0.2982889711856842\n",
103 | "epoch 150: loss = 0.28652894496917725\n",
104 | "epoch 160: loss = 0.27479392290115356\n",
105 | "epoch 170: loss = 0.2623614966869354\n",
106 | "epoch 180: loss = 0.2507155239582062\n",
107 | "epoch 190: loss = 0.24356970191001892\n",
108 | "epoch 200: loss = 0.23660524189472198\n",
109 | "epoch 210: loss = 0.23186270892620087\n",
110 | "epoch 220: loss = 0.2285982072353363\n",
111 | "epoch 230: loss = 0.22585973143577576\n",
112 | "epoch 240: loss = 0.22320273518562317\n",
113 | "input [[8, 4, 6, 2]]\n",
114 | "compressed [[-0.89236486]]\n",
115 | "reconstructed [[6.5121665 2.8261158 5.5185246 1.9634247]]\n"
116 | ]
117 | },
118 | {
119 | "data": {
120 | "text/plain": [
121 | "array([[6.5121665, 2.8261158, 5.5185246, 1.9634247]], dtype=float32)"
122 | ]
123 | },
124 | "execution_count": 3,
125 | "metadata": {},
126 | "output_type": "execute_result"
127 | }
128 | ],
129 | "source": [
130 | "hidden_dim = 1\n",
131 | "data = datasets.load_iris().data\n",
132 | "input_dim = len(data[0])\n",
133 | "ae = Autoencoder(input_dim, hidden_dim)\n",
134 | "ae.train(data)\n",
135 | "ae.test([[8, 4, 6, 2]])"
136 | ]
137 | },
138 | {
139 | "cell_type": "code",
140 | "execution_count": null,
141 | "metadata": {},
142 | "outputs": [],
143 | "source": []
144 | }
145 | ],
146 | "metadata": {
147 | "kernelspec": {
148 | "display_name": "Python 3",
149 | "language": "python",
150 | "name": "python3"
151 | },
152 | "language_info": {
153 | "codemirror_mode": {
154 | "name": "ipython",
155 | "version": 3
156 | },
157 | "file_extension": ".py",
158 | "mimetype": "text/x-python",
159 | "name": "python",
160 | "nbconvert_exporter": "python",
161 | "pygments_lexer": "ipython3",
162 | "version": "3.7.0b1"
163 | }
164 | },
165 | "nbformat": 4,
166 | "nbformat_minor": 2
167 | }
168 |
--------------------------------------------------------------------------------
/ch11/Listing 11.07-11.08.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import numpy as np\n",
11 | "from sklearn import datasets"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "def get_batch(X, size):\n",
21 | " a = np.random.choice(len(X), size, replace=False)\n",
22 | " return X[a] "
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 3,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "class Autoencoder:\n",
32 | " def __init__(self, input_dim, hidden_dim, epoch=250, learning_rate=0.001):\n",
33 | " self.epoch = epoch\n",
34 | " self.learning_rate = learning_rate\n",
35 | " x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])\n",
36 | " with tf.name_scope('encode'):\n",
37 | " weights = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name='weights')\n",
38 | " biases = tf.Variable(tf.zeros([hidden_dim]), name='biases')\n",
39 | " encoded = tf.nn.tanh(tf.matmul(x, weights) + biases)\n",
40 | " with tf.name_scope('decode'):\n",
41 | " weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name='weights')\n",
42 | " biases = tf.Variable(tf.zeros([input_dim]), name='biases')\n",
43 | " decoded = tf.matmul(encoded, weights) + biases\n",
44 | " \n",
45 | " self.x = x\n",
46 | " self.encoded = encoded\n",
47 | " self.decoded = decoded\n",
48 | " self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.x, self.decoded))))\n",
49 | " self.train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)\n",
50 | " self.saver = tf.train.Saver()\n",
51 | " \n",
52 | " def train(self, data):\n",
53 | " num_samples = len(data)\n",
54 | " with tf.Session() as sess:\n",
55 | " sess.run(tf.global_variables_initializer())\n",
56 | " for i in range(self.epoch):\n",
57 | " for j in range(num_samples):\n",
58 | " l, _ = sess.run([self.loss, self.train_op], feed_dict={self.x: [data[j]]})\n",
59 | " if i % 10 == 0:\n",
60 | " print('epoch {0}: loss = {1}'.format(i, l))\n",
61 | " self.saver.save(sess, '../models/model.ckpt')\n",
62 | " self.saver.save(sess, '../models/model.ckpt')\n",
63 | " \n",
64 | " def test(self, data): \n",
65 | " with tf.Session() as sess:\n",
66 | " self.saver.restore(sess, '../models/model.ckpt')\n",
67 | " hidden, reconstructed = sess.run([self.encoded, self.decoded], feed_dict={self.x: data})\n",
68 | " print('input', data)\n",
69 | " print('compressed', hidden)\n",
70 | " print('reconstructed', reconstructed)\n",
71 | " return reconstructed\n",
72 | "\n",
73 | " def train(self, data, batch_size=10):\n",
74 | " with tf.Session() as sess:\n",
75 | " sess.run(tf.global_variables_initializer())\n",
76 | " for i in range(self.epoch):\n",
77 | " for j in range(500):\n",
78 | " batch_data = get_batch(data, batch_size)\n",
79 | " l, _ = sess.run([self.loss, self.train_op], feed_dict={self.x: batch_data})\n",
80 | " if i % 10 == 0:\n",
81 | " print('epoch {0}: loss = {1}'.format(i, l))\n",
82 | " self.saver.save(sess, '../models/model.ckpt')\n",
83 | " self.saver.save(sess, '../models/model.ckpt')"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": 4,
89 | "metadata": {},
90 | "outputs": [
91 | {
92 | "name": "stderr",
93 | "output_type": "stream",
94 | "text": [
95 | "WARNING: Logging before flag parsing goes to stderr.\n",
96 | "W0801 16:32:23.199314 4610823616 deprecation.py:506] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/training/rmsprop.py:119: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
97 | "Instructions for updating:\n",
98 | "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
99 | ]
100 | },
101 | {
102 | "name": "stdout",
103 | "output_type": "stream",
104 | "text": [
105 | "epoch 0: loss = 3.287008047103882\n",
106 | "epoch 10: loss = 0.5802170038223267\n",
107 | "epoch 20: loss = 0.30212658643722534\n",
108 | "epoch 30: loss = 0.2772676348686218\n",
109 | "epoch 40: loss = 0.34443092346191406\n",
110 | "epoch 50: loss = 0.2815895080566406\n",
111 | "epoch 60: loss = 0.31978023052215576\n",
112 | "epoch 70: loss = 0.1632925420999527\n",
113 | "epoch 80: loss = 0.25616973638534546\n",
114 | "epoch 90: loss = 0.30626076459884644\n",
115 | "epoch 100: loss = 0.28544068336486816\n",
116 | "epoch 110: loss = 0.28550511598587036\n",
117 | "epoch 120: loss = 0.44335800409317017\n",
118 | "epoch 130: loss = 0.30442678928375244\n",
119 | "epoch 140: loss = 0.31502965092658997\n",
120 | "epoch 150: loss = 0.4268891215324402\n",
121 | "epoch 160: loss = 0.325786828994751\n",
122 | "epoch 170: loss = 0.2665727734565735\n",
123 | "epoch 180: loss = 0.2218041568994522\n",
124 | "epoch 190: loss = 0.3120783567428589\n",
125 | "epoch 200: loss = 0.25270798802375793\n",
126 | "epoch 210: loss = 0.268833190202713\n",
127 | "epoch 220: loss = 0.33403274416923523\n",
128 | "epoch 230: loss = 0.2214595377445221\n",
129 | "epoch 240: loss = 0.2950868010520935\n",
130 | "input [[8, 4, 6, 2]]\n",
131 | "compressed [[0.4036218]]\n",
132 | "reconstructed [[6.877247 2.816194 6.2037363 2.2288456]]\n"
133 | ]
134 | },
135 | {
136 | "data": {
137 | "text/plain": [
138 | "array([[6.877247 , 2.816194 , 6.2037363, 2.2288456]], dtype=float32)"
139 | ]
140 | },
141 | "execution_count": 4,
142 | "metadata": {},
143 | "output_type": "execute_result"
144 | }
145 | ],
146 | "source": [
147 | "hidden_dim = 1\n",
148 | "data = datasets.load_iris().data\n",
149 | "input_dim = len(data[0])\n",
150 | "ae = Autoencoder(input_dim, hidden_dim)\n",
151 | "ae.train(data)\n",
152 | "ae.test([[8, 4, 6, 2]])"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": []
161 | }
162 | ],
163 | "metadata": {
164 | "kernelspec": {
165 | "display_name": "Python 3",
166 | "language": "python",
167 | "name": "python3"
168 | },
169 | "language_info": {
170 | "codemirror_mode": {
171 | "name": "ipython",
172 | "version": 3
173 | },
174 | "file_extension": ".py",
175 | "mimetype": "text/x-python",
176 | "name": "python",
177 | "nbconvert_exporter": "python",
178 | "pygments_lexer": "ipython3",
179 | "version": "3.7.0b1"
180 | }
181 | },
182 | "nbformat": 4,
183 | "nbformat_minor": 2
184 | }
185 |
--------------------------------------------------------------------------------
/ch15/vgg_create_imgs.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | from multiprocessing import Pool
4 | from skimage import color
5 | from skimage.io import imread
6 | import pandas as pd
7 | import os
8 | from tqdm import tqdm_notebook as tqdm
9 | from tqdm.auto import tqdm as tqdm_nn
10 | import random
11 | import requests
12 | from urllib.error import HTTPError, URLError
13 | from requests.exceptions import ConnectionError
14 | from requests.exceptions import ReadTimeout
15 | from http.client import IncompleteRead
16 | import socket
17 | from socket import timeout
18 |
19 | socket.setdefaulttimeout(30)
20 |
21 | def fx(df):
22 | for index, row in tqdm(df.iterrows()):
23 | url = row['URL']
24 | box = row[['LEFT', 'TOP', 'RIGHT', 'BOTTOM']].values
25 | celeb = row['CELEB']
26 | number = row['ID']
27 | clip_image_and_save(url, box, celeb, number)
28 |
29 |
30 | def parallelize_dataframe(df, func, n_cores=48):
31 | df_split = np.array_split(df, n_cores)
32 | pool = Pool(n_cores)
33 | pool.map(func, df_split)
34 | pool.close()
35 | pool.join()
36 |
37 | def clip_image_and_save(url, bbox, celeb, number):
38 |
39 | directory = 'vgg-face/'+celeb
40 | img_name = directory+'/'+str(number)+'_244x244.png'
41 |
42 | if os.path.exists(img_name):
43 | print('Image '+img_name+' already exists, skipping.')
44 | return
45 |
46 | try:
47 | image = imread(url)
48 | except (SyntaxError, IndexError, AttributeError, HTTPError, ConnectionResetError, ConnectionRefusedError, URLError, ValueError, IncompleteRead, TimeoutError, timeout) as e:
49 | print('Error writing url: '+url+' skipping. Message: '+str(e))
50 | return
51 |
52 | x1 = int(bbox[0])
53 | y1 = int(bbox[1])
54 | x2 = int(bbox[2])
55 | y2 = int(bbox[3])
56 |
57 | w=x2-x1
58 | h=y2-y1
59 |
60 |
61 | if not os.path.exists(directory):
62 | print('Making '+directory)
63 | os.makedirs(directory)
64 |
65 | crop_img = image[y1:y1+h, x1:x1+w]
66 | new_size = 224,224
67 | try:
68 | crop_img = cv2.resize(crop_img, new_size, interpolation=cv2.INTER_CUBIC)
69 | except cv2.error as e:
70 | print('Error cropping with CV2 for image: '+url+'. skipping. Message: '+str(e))
71 | return
72 |
73 | print('Writing '+img_name)
74 | cv2.imwrite(img_name,crop_img)
75 |
76 | tqdm_nn.pandas()
77 | valid_face_urls_path = 'vgg_face_full_urls.csv'
78 | print('Reading Dataframe.')
79 | df = pd.read_csv(valid_face_urls_path)
80 | df = df[df.VALID_URL==True]
81 | print('Reducing Dataframe to valid URLs and resetting index.')
82 | df.reset_index(inplace=True)
83 |
84 | parallelize_dataframe(df, fx, 12)
85 |
86 |
87 |
--------------------------------------------------------------------------------
/ch15/vgg_valid_url.py:
--------------------------------------------------------------------------------
1 | from tqdm.auto import tqdm as tqdm_nn
2 | import pandas as pd
3 | import numpy as np
4 | from multiprocessing import Pool
5 | import requests
6 | from requests.exceptions import ConnectionError
7 | from requests.exceptions import ReadTimeout
8 |
9 | print('Reading Dataframe.')
10 | df = pd.read_csv('vgg_face_full.csv')
11 |
12 | tqdm_nn.pandas()
13 | timeout=1
14 |
15 | def url_ok(url):
16 | try:
17 | r = requests.head(url, timeout=timeout)
18 | return r.status_code == 200
19 | except (ConnectionError, ReadTimeout) as e:
20 | #print("URL connection error", url)
21 | return False
22 |
23 | def fx(df):
24 | df['VALID_URL'] = df['URL'].progress_apply(url_ok)
25 | return df
26 |
27 | def parallelize_dataframe(df, func, n_cores=8):
28 | df_split = np.array_split(df, n_cores)
29 | pool = Pool(n_cores)
30 | df = pd.concat(pool.map(func, df_split))
31 | pool.close()
32 | pool.join()
33 | return df
34 |
35 | df = parallelize_dataframe(df, fx)
36 | print('Writing Dataframe.')
37 | df.to_csv('vgg_face_full_urls.csv')
38 |
--------------------------------------------------------------------------------
/ch16/Listing 16.01 - 16.06.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import tensorflow as tf\n",
11 | "from tensorflow.contrib import rnn"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 2,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "class SeriesPredictor:\n",
21 | " def __init__(self, input_dim, seq_size, hidden_dim=10):\n",
22 | " self.input_dim = input_dim\n",
23 | " self.seq_size = seq_size\n",
24 | " self.hidden_dim = hidden_dim\n",
25 | " \n",
26 | " self.W_out = tf.Variable(tf.random_normal([hidden_dim, 1]), name='W_out')\n",
27 | " self.b_out = tf.Variable(tf.random_normal([1]), name='b_out')\n",
28 | " self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim])\n",
29 | " self.y = tf.placeholder(tf.float32, [None, seq_size])\n",
30 | " self.cost = tf.reduce_mean(tf.square(self.model() - self.y)) \n",
31 | " self.train_op = tf.train.AdamOptimizer().minimize(self.cost)\n",
32 | " self.saver = tf.train.Saver()\n",
33 | " \n",
34 | " def model(self): \n",
35 | " \"\"\"\n",
36 | " :param x: inputs of size [T, batch_size, input_size] \n",
37 | " :param W: matrix of fully-connected output layer weights \n",
38 | " :param b: vector of fully-connected output layer biases \n",
39 | " \"\"\"\n",
40 | "\n",
41 | " cell = rnn.BasicLSTMCell(self.hidden_dim)\n",
42 | " outputs, states = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32)\n",
43 | " num_examples = tf.shape(self.x)[0]\n",
44 | " print('Shape '+str(np.shape(self.x))) # this turns out to be [3, 4, 1] - 3 samples of size 4 in a matrix\n",
45 | " print('Num Ex: '+str(np.shape(self.x)[0])) # this is 3 for self.x\n",
46 | " W_repeated = tf.tile(tf.expand_dims(self.W_out, 0), [num_examples, 1, 1]) # creates repeated array of [10, 1, 1] * [3, 1, 1] - our \"memory\" of what we picked for each hidden dim in the 3 states\n",
47 | " out = tf.matmul(outputs, W_repeated) + self.b_out # [batch_size, max_time, cell.output_size] so [3, 3, 10] * [30, 1, 1] + [3]\n",
48 | " out = tf.squeeze(out) # removes all size 1 dimensions, so takes us down from \n",
49 | " return out\n",
50 | " \n",
51 | " def train(self, train_x, train_y):\n",
52 | " with tf.Session() as sess:\n",
53 | " tf.get_variable_scope().reuse_variables()\n",
54 | " sess.run(tf.global_variables_initializer())\n",
55 | " for i in range(1000):\n",
56 | " _, mse = sess.run([self.train_op, self.cost], feed_dict={self.x: train_x, self.y: train_y})\n",
57 | " if i % 100 == 0:\n",
58 | " print(i, mse)\n",
59 | " save_path = self.saver.save(sess, '../models/ch16-model.ckpt')\n",
60 | " print('Model saved to {}'.format(save_path))\n",
61 | " \n",
62 | " def test(self, test_x):\n",
63 | " with tf.Session() as sess:\n",
64 | " tf.get_variable_scope().reuse_variables()\n",
65 | " self.saver.restore(sess, '../models/ch16-model.ckpt')\n",
66 | " output = sess.run(self.model(), feed_dict={self.x: test_x})\n",
67 | " print(output)"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 3,
73 | "metadata": {},
74 | "outputs": [
75 | {
76 | "name": "stderr",
77 | "output_type": "stream",
78 | "text": [
79 | "WARNING: Logging before flag parsing goes to stderr.\n",
80 | "W0802 10:33:08.541146 4554970560 deprecation.py:323] From :22: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
81 | "Instructions for updating:\n",
82 | "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
83 | "W0802 10:33:08.542406 4554970560 deprecation.py:323] From :23: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
84 | "Instructions for updating:\n",
85 | "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
86 | "W0802 10:33:08.592143 4554970560 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/rnn_cell_impl.py:735: Layer.add_variable (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n",
87 | "Instructions for updating:\n",
88 | "Please use `layer.add_weight` method instead.\n",
89 | "W0802 10:33:08.598670 4554970560 deprecation.py:506] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/rnn_cell_impl.py:739: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
90 | "Instructions for updating:\n",
91 | "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
92 | ]
93 | },
94 | {
95 | "name": "stdout",
96 | "output_type": "stream",
97 | "text": [
98 | "Shape (?, 4, 1)\n",
99 | "Num Ex: ?\n",
100 | "0 85.54771\n",
101 | "100 59.58133\n",
102 | "200 34.74611\n",
103 | "300 22.044367\n",
104 | "400 11.355663\n",
105 | "500 8.089295\n",
106 | "600 6.232077\n",
107 | "700 4.643535\n",
108 | "800 3.5185144\n",
109 | "900 2.7564201\n",
110 | "Model saved to ../models/ch16-model.ckpt\n",
111 | "Shape (?, 4, 1)\n",
112 | "Num Ex: ?\n",
113 | "[[ 1.0581387 2.769493 5.3942537 7.799148 ]\n",
114 | " [ 4.250075 9.052462 11.027655 11.472687 ]]\n"
115 | ]
116 | }
117 | ],
118 | "source": [
119 | "predictor = SeriesPredictor(input_dim=1, seq_size=4, hidden_dim=10)\n",
120 | "train_x = [[[1], [2], [5], [6]],\n",
121 | " [[5], [7], [7], [8]],\n",
122 | " [[3], [4], [5], [7]]]\n",
123 | "train_y = [[1, 3, 7, 11],\n",
124 | " [5, 12, 14, 15],\n",
125 | " [3, 7, 9, 12]]\n",
126 | "predictor.train(train_x, train_y)\n",
127 | "test_x = [[[1], [2], [3], [4]],\n",
128 | " [[4], [5], [6], [7]]]\n",
129 | "predictor.test(test_x)"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": []
138 | }
139 | ],
140 | "metadata": {
141 | "kernelspec": {
142 | "display_name": "Python 3",
143 | "language": "python",
144 | "name": "python3"
145 | },
146 | "language_info": {
147 | "codemirror_mode": {
148 | "name": "ipython",
149 | "version": 3
150 | },
151 | "file_extension": ".py",
152 | "mimetype": "text/x-python",
153 | "name": "python",
154 | "nbconvert_exporter": "python",
155 | "pygments_lexer": "ipython3",
156 | "version": "3.7.0b1"
157 | }
158 | },
159 | "nbformat": 4,
160 | "nbformat_minor": 2
161 | }
162 |
--------------------------------------------------------------------------------
/ch17/TF Datasets and LibriSpeech.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf\n",
10 | "import glob\n",
11 | "from tensorflow.data.experimental import AUTOTUNE "
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "speech_data_path = \"../data/LibriSpeech\"\n",
21 | "train_path = speech_data_path + \"/train-clean-100\"\n",
22 | "dev_path = speech_data_path + \"/dev-clean\"\n",
23 | "test_path = speech_data_path + \"/test-clean\"\n"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "train_audio_wav = [file for file in glob.glob(train_path + \"/*/*/*.wav\")]"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "print(len(train_audio_wav))"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "BATCH_SIZE=10\n",
51 | "train_size=100\n",
52 | "train_audio_ds = tf.data.Dataset.from_tensor_slices(train_audio_wav[0:train_size])\n",
53 | "train_audio_ds = train_audio_ds.batch(25)\n",
54 | "train_audio_ds = train_audio_ds.shuffle(buffer_size=train_size)\n",
55 | "train_audio_ds = train_audio_ds.prefetch(buffer_size=AUTOTUNE)\n",
56 | "num_epochs = 2"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "with tf.Session() as sess:\n",
66 | " sess.run(tf.global_variables_initializer())\n",
67 | " \n",
68 | " for epoch in range(0, num_epochs):\n",
69 | " iter = train_audio_ds.make_one_shot_iterator()\n",
70 | " batch_num = 0\n",
71 | " iter_op = iter.get_next()\n",
72 | "\n",
73 | " while True:\n",
74 | " try:\n",
75 | " train_batch = sess.run(iter_op)\n",
76 | " print(train_batch)\n",
77 | " batch_num += 1\n",
78 | " print('Batch Num %d ' % (batch_num))\n",
79 | " except tf.errors.OutOfRangeError:\n",
80 | " print('Epoch %d ' % (epoch))\n",
81 | " break"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": null,
87 | "metadata": {},
88 | "outputs": [],
89 | "source": []
90 | }
91 | ],
92 | "metadata": {
93 | "kernelspec": {
94 | "display_name": "Python 3",
95 | "language": "python",
96 | "name": "python3"
97 | },
98 | "language_info": {
99 | "codemirror_mode": {
100 | "name": "ipython",
101 | "version": 3
102 | },
103 | "file_extension": ".py",
104 | "mimetype": "text/x-python",
105 | "name": "python",
106 | "nbconvert_exporter": "python",
107 | "pygments_lexer": "ipython3",
108 | "version": "3.6.9"
109 | }
110 | },
111 | "nbformat": 4,
112 | "nbformat_minor": 2
113 | }
114 |
--------------------------------------------------------------------------------
/ch18/Listing 18.01 - 18.04.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "input_dim = 1\n",
19 | "seq_size = 6\n",
20 | "input_placeholder = tf.placeholder(dtype=tf.float32, \n",
21 | " shape=[None, seq_size, input_dim])"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 3,
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stderr",
31 | "output_type": "stream",
32 | "text": [
33 | "WARNING: Logging before flag parsing goes to stderr.\n",
34 | "W0802 10:39:26.192169 4642076096 lazy_loader.py:50] \n",
35 | "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
36 | "For more information, please see:\n",
37 | " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
38 | " * https://github.com/tensorflow/addons\n",
39 | " * https://github.com/tensorflow/io (for I/O related ops)\n",
40 | "If you depend on functionality not listed there, please file an issue.\n",
41 | "\n",
42 | "W0802 10:39:27.169586 4642076096 deprecation.py:323] From :2: LSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
43 | "Instructions for updating:\n",
44 | "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
45 | "W0802 10:39:27.170794 4642076096 deprecation.py:323] From :8: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
46 | "Instructions for updating:\n",
47 | "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
48 | "W0802 10:39:27.244272 4642076096 deprecation.py:323] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/rnn_cell_impl.py:958: Layer.add_variable (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n",
49 | "Instructions for updating:\n",
50 | "Please use `layer.add_weight` method instead.\n",
51 | "W0802 10:39:27.256756 4642076096 deprecation.py:506] From /Users/mattmann/git/buildout.python/python-3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/rnn_cell_impl.py:962: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
52 | "Instructions for updating:\n",
53 | "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
54 | ]
55 | }
56 | ],
57 | "source": [
58 | "def make_cell(state_dim):\n",
59 | " return tf.contrib.rnn.LSTMCell(state_dim)\n",
60 | "\n",
61 | "with tf.variable_scope(\"first_cell\") as scope:\n",
62 | " cell = make_cell(state_dim=10)\n",
63 | " outputs, states = tf.nn.dynamic_rnn(cell,\n",
64 | " input_placeholder,\n",
65 | " dtype=tf.float32)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": 4,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "with tf.variable_scope(\"second_cell\") as scope:\n",
75 | " cell2 = make_cell(state_dim=10)\n",
76 | " outputs2, states2 = tf.nn.dynamic_rnn(cell2,\n",
77 | " outputs,\n",
78 | " dtype=tf.float32)"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 5,
84 | "metadata": {},
85 | "outputs": [
86 | {
87 | "name": "stderr",
88 | "output_type": "stream",
89 | "text": [
90 | "W0802 10:39:27.404192 4642076096 deprecation.py:323] From :3: MultiRNNCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
91 | "Instructions for updating:\n",
92 | "This class is equivalent as tf.keras.layers.StackedRNNCells, and will be replaced by that in Tensorflow 2.0.\n"
93 | ]
94 | }
95 | ],
96 | "source": [
97 | "def make_multi_cell(state_dim, num_layers):\n",
98 | " cells = [make_cell(state_dim) for _ in range(num_layers)]\n",
99 | " return tf.contrib.rnn.MultiRNNCell(cells)\n",
100 | "\n",
101 | "multi_cell = make_multi_cell(state_dim=10, num_layers=4)\n",
102 | "outputs4, states4 = tf.nn.dynamic_rnn(multi_cell, \n",
103 | " input_placeholder, \n",
104 | " dtype=tf.float32)"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 6,
110 | "metadata": {},
111 | "outputs": [
112 | {
113 | "name": "stdout",
114 | "output_type": "stream",
115 | "text": [
116 | "Tensor(\"rnn/transpose_1:0\", shape=(?, 6, 10), dtype=float32)\n",
117 | "(LSTMStateTuple(c=, h=), LSTMStateTuple(c=, h=), LSTMStateTuple(c=, h=), LSTMStateTuple(c=, h=))\n"
118 | ]
119 | }
120 | ],
121 | "source": [
122 | "print(outputs4)\n",
123 | "print(states4)"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": null,
129 | "metadata": {},
130 | "outputs": [],
131 | "source": []
132 | }
133 | ],
134 | "metadata": {
135 | "kernelspec": {
136 | "display_name": "Python 3",
137 | "language": "python",
138 | "name": "python3"
139 | },
140 | "language_info": {
141 | "codemirror_mode": {
142 | "name": "ipython",
143 | "version": 3
144 | },
145 | "file_extension": ".py",
146 | "mimetype": "text/x-python",
147 | "name": "python",
148 | "nbconvert_exporter": "python",
149 | "pygments_lexer": "ipython3",
150 | "version": "3.7.0b1"
151 | }
152 | },
153 | "nbformat": 4,
154 | "nbformat_minor": 2
155 | }
156 |
--------------------------------------------------------------------------------
/ch18/Listing 18.05 - 18.08.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import tensorflow as tf"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "embeddings_0d = tf.constant([17, 22, 35, 51])"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "embeddings_4d = tf.constant([[1, 0, 0, 0],\n",
28 | " [0, 1, 0, 0],\n",
29 | " [0, 0, 1, 0],\n",
30 | " [0, 0, 0, 1]])\n"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "metadata": {},
37 | "outputs": [],
38 | "source": [
39 | "embeddings_2x2d = tf.constant([[[1, 0], [0, 0]],\n",
40 | " [[0, 1], [0, 0]],\n",
41 | " [[0, 0], [1, 0]],\n",
42 | " [[0, 0], [0, 1]]])\n"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": 5,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "ids = tf.constant([1, 0, 2])"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 6,
57 | "metadata": {},
58 | "outputs": [
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "[22 17 35]\n",
64 | "[[0 1 0 0]\n",
65 | " [1 0 0 0]\n",
66 | " [0 0 1 0]]\n",
67 | "[[[0 1]\n",
68 | " [0 0]]\n",
69 | "\n",
70 | " [[1 0]\n",
71 | " [0 0]]\n",
72 | "\n",
73 | " [[0 0]\n",
74 | " [1 0]]]\n"
75 | ]
76 | }
77 | ],
78 | "source": [
79 | "with tf.Session() as sess: \n",
80 | " lookup_0d = sess.run(tf.nn.embedding_lookup(embeddings_0d, ids))\n",
81 | " print(lookup_0d)\n",
82 | "\n",
83 | " lookup_4d = sess.run(tf.nn.embedding_lookup(embeddings_4d, ids))\n",
84 | " print(lookup_4d)\n",
85 | "\n",
86 | " lookup_2x2d = sess.run(tf.nn.embedding_lookup(embeddings_2x2d, ids))\n",
87 | " print(lookup_2x2d)"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": null,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": []
96 | }
97 | ],
98 | "metadata": {
99 | "kernelspec": {
100 | "display_name": "Python 3",
101 | "language": "python",
102 | "name": "python3"
103 | },
104 | "language_info": {
105 | "codemirror_mode": {
106 | "name": "ipython",
107 | "version": 3
108 | },
109 | "file_extension": ".py",
110 | "mimetype": "text/x-python",
111 | "name": "python",
112 | "nbconvert_exporter": "python",
113 | "pygments_lexer": "ipython3",
114 | "version": "3.7.0b1"
115 | }
116 | },
117 | "nbformat": 4,
118 | "nbformat_minor": 2
119 | }
120 |
--------------------------------------------------------------------------------
/data/delete.txt:
--------------------------------------------------------------------------------
1 | Temporary file.
2 |
--------------------------------------------------------------------------------
/download-data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 | set -x
4 | mkdir -p data
5 | mkdir -p data/cache
6 |
7 | echo "Downloading Ch4 data..."
8 | curl -L "https://www.dropbox.com/s/11331ycu0fmpb5h/311.csv.xz?dl=0" -o data/311.csv.xz
9 | curl -L "https://www.dropbox.com/s/offtc9mul4eqbz2/311.csv.xz.sha256?dl=0" -o data/311.csv.xz.sha256
10 | pushd data
11 | sha256sum -c 311.csv.xz.sha256 && rm 311.csv.xz.sha256
12 | unxz 311.csv.xz
13 | popd
14 | echo "...done"
15 |
16 | echo "Downloading Ch6 data..."
17 | curl -L "https://www.dropbox.com/s/f98keuexrnvc5qo/word2vec-nlp-tutorial.tar.xz?dl=0" -o word2vec-nlp-tutorial.tar.xz
18 | curl -L "https://www.dropbox.com/s/6b7q40pdfohi8ri/word2vec-nlp-tutorial.tar.xz.sha256?dl=0" -o word2vec-nlp-tutorial.tar.xz.sha256
19 | sha256sum -c word2vec-nlp-tutorial.tar.xz.sha256 && rm word2vec-nlp-tutorial.tar.xz.sha256
20 | tar -C data -xJvf word2vec-nlp-tutorial.tar.xz && rm word2vec-nlp-tutorial.tar.xz
21 | curl -L "https://www.dropbox.com/s/hbryifzukwa5uh5/aclImdb_v1.tar.xz?dl=0" -o aclImdb_v1.tar.xz
22 | curl -L "https://www.dropbox.com/s/78vf3bkcqu6gn95/aclImdb_v1.tar.xz.sha256?dl=0" -o aclImdb_v1.tar.xz.sha256
23 | sha256sum -c aclImdb_v1.tar.xz.sha256 && rm aclImdb_v1.tar.xz.sha256
24 | tar -C data -xJvf aclImdb_v1.tar.xz && rm aclImdb_v1.tar.xz
25 | echo "...done"
26 |
27 | echo "Downloading Ch7 data..."
28 | curl -L "https://www.dropbox.com/s/qenizussmpf6o90/audio_dataset.tar.xz?dl=0" -o audio_dataset.tar.xz
29 | curl -L "https://www.dropbox.com/s/zfqnrjmqd85oynn/audio_dataset.tar.xz.sha256?dl=0" -o audio_dataset.tar.xz.sha256
30 | sha256sum -c audio_dataset.tar.xz.sha256 && rm audio_dataset.tar.xz.sha256
31 | tar -C data -xJvf audio_dataset.tar.xz && rm audio_dataset.tar.xz
32 | curl -L "https://www.dropbox.com/s/uuv5nk6hqx5yv5n/TalkingMachinesPodcast.wav.xz?dl=0" -o data/TalkingMachinesPodcast.wav.xz
33 | curl -L "https://www.dropbox.com/s/v07jp2js999ciyi/TalkingMachinesPodcast.wav.xz.sha256?dl=0" -o data/TalkingMachinesPodcast.wav.xz.sha256
34 | pushd data
35 | sha256sum -c TalkingMachinesPodcast.wav.xz.sha256 && rm TalkingMachinesPodcast.wav.xz.sha256
36 | unxz TalkingMachinesPodcast.wav.xz
37 | popd
38 | echo "...done"
39 |
40 | echo "Downloading Ch8 data..."
41 | curl -L "https://www.dropbox.com/s/um35z0jc338mi4p/user-identification-from-walking-activity.tar.xz?dl=0" -o user-identification-from-walking-activity.tar.xz
42 | curl -L "https://www.dropbox.com/s/shdaavem2q47laj/user-identification-from-walking-activity.tar.xz.sha256?dl=0" -o user-identification-from-walking-activity.tar.xz.sha256
43 | sha256sum -c user-identification-from-walking-activity.tar.xz.sha256 && rm user-identification-from-walking-activity.tar.xz.sha256
44 | tar -C data -xJvf user-identification-from-walking-activity.tar.xz && rm user-identification-from-walking-activity.tar.xz
45 | echo "...done"
46 |
47 | echo "Downloading Ch 10 data..."
48 | curl -L "https://www.dropbox.com/s/lfmbvahvak0a1yr/mobypos.txt.xz?dl=0" -o data/mobypos.txt.xz
49 | curl -L "https://www.dropbox.com/s/vyzqx1fn71jhahz/mobypos.txt.xz.sha256?dl=0" -o data/mobypos.txt.xz.sha256
50 | pushd data
51 | sha256sum -c mobypos.txt.xz.sha256 && rm mobypos.txt.xz.sha256
52 | unxz mobypos.txt.xz
53 | popd
54 | echo "...done"
55 |
56 | echo "Downloading Ch 12 and Ch14 and Ch15 (CIFAR-10) data..."
57 | curl -L "https://www.dropbox.com/s/58ffbllifzt1n8p/cifar-10-python.tar.xz?dl=0" -o cifar-10-python.tar.xz
58 | curl -L "https://www.dropbox.com/s/hjdpljhwn2rlb64/cifar-10-python.tar.xz.sha256?dl=0" -o cifar-10-python.tar.xz.sha256
59 | sha256sum -c cifar-10-python.tar.xz.sha256 && rm cifar-10-python.tar.xz.sha256
60 | tar -C data -xJvf cifar-10-python.tar.xz && rm cifar-10-python.tar.xz
61 | echo "...done"
62 |
63 | echo "Downloading Ch15 data (VGG Face)..."
64 | curl -L "https://www.dropbox.com/s/nut408obn99la12/vgg_face_dataset.tar.xz?dl=0" -o vgg_face_dataset.tar.xz
65 | curl -L "https://www.dropbox.com/s/9hspp5mr8qpbata/vgg_face_dataset.tar.xz.sha256?dl=0" -o vgg_face_dataset.tar.xz.sha256
66 | sha256sum -c vgg_face_dataset.tar.xz.sha256 && rm vgg_face_dataset.tar.xz.sha256
67 | tar -C data -xJvf vgg_face_dataset.tar.xz && rm vgg_face_dataset.tar.xz
68 | curl -L "https://www.dropbox.com/s/5nv0pw367wlfkrc/vgg_face-small.tar.xz?dl=0" -o vgg_face-small.tar.xz
69 | curl -L "https://www.dropbox.com/s/rbj6m8gf4xjebb0/vgg_face-small.tar.xz.sha256?dl=0" -o vgg_face-small.tar.xz.sha256
70 | sha256sum -c vgg_face-small.tar.xz.sha256 && rm vgg_face-small.tar.xz.sha256
71 | tar -C data -xJvf vgg_face-small.tar.xz && rm vgg_face-small.tar.xz
72 | curl -L "https://www.dropbox.com/s/tyukhq0r3cgk5xx/vgg_face_full_urls.csv.xz?dl=0" -o data/vgg_face_full_urls.csv.xz
73 | curl -L "https://www.dropbox.com/s/ea503l12x02wtse/vgg_face_full_urls.csv.xz.sha256?dl=0" -o data/vgg_face_full_urls.csv.xz.sha256
74 | curl -L "https://www.dropbox.com/s/i6wncx0fs1k51nf/vgg_face_full.csv.xz?dl=0" -o data/vgg_face_full.csv.xz
75 | curl -L "https://www.dropbox.com/s/kapt1u0dcrxmmta/vgg_face_full.csv.xz.sha256?dl=0" -o data/vgg_face_full.csv.xz.sha256
76 | pushd data
77 | sha256sum -c vgg_face_full_urls.csv.xz.sha256 && rm vgg_face_full_urls.csv.xz.sha256
78 | unxz vgg_face_full_urls.csv.xz
79 | sha256sum -c vgg_face_full.csv.xz.sha256 && rm vgg_face_full.csv.xz.sha256
80 | unxz vgg_face_full.csv.xz
81 | popd
82 | curl -L "https://www.dropbox.com/s/tl27ja252tpi9sy/vgg-models.tar.xz?dl=0" -o vgg-models.tar.xz
83 | curl -L "https://www.dropbox.com/s/bfmr80r7jaq0v88/vgg-models.tar.xz.sha256?dl=0" -o vgg-models.tar.xz.sha256
84 | sha256sum -c vgg-models.tar.xz.sha256 && rm vgg-models.tar.xz.sha256
85 | tar -C data -xJvf vgg-models.tar.xz && rm vgg-models.tar.xz
86 | pushd models
87 | curl -L "https://www.dropbox.com/s/uinp84jar0v9rtx/vgg_face_weights.h5.xz?dl=0" -o vgg_face_weights.h5.xz
88 | curl -L "https://www.dropbox.com/s/fkw89nbrkwcjs7q/vgg_face_weights.h5.xz.sha256?dl=0" -o vgg_face_weights.h5.xz.sha256
89 | sha256sum -c vgg_face_weights.h5.xz.sha256 && rm vgg_face_weights.h5.xz.sha256
90 | unxz vgg_face_weights.h5.xz
91 | popd
92 | echo "...done"
93 |
94 | echo "Downloading Ch16 data..."
95 | curl -L "https://www.dropbox.com/s/8a207h2klogtpep/international-airline-passengers.csv?dl=0" -o data/international-airline-passengers.csv
96 | curl -L "https://www.dropbox.com/s/j41jldmcglrsgbo/international-airline-passengers.csv.sha256?dl=0" -o data/international-airline-passengers.csv.sha256
97 | pushd data
98 | sha256sum -c international-airline-passengers.csv.sha256 && rm international-airline-passengers.csv.sha256
99 | popd
100 | echo "...done"
101 |
102 | echo "Downloading Ch17 data..."
103 | curl -L "https://www.dropbox.com/s/hr28xhat69kmu8j/LibriSpeech.tar.bz2.partaa?dl=0" -o LibriSpeech.tar.bz2.partaa
104 | curl -L "https://www.dropbox.com/s/namsr3tacyxctyj/LibriSpeech.tar.bz2.partab?dl=0" -o LibriSpeech.tar.bz2.partab
105 | curl -L "https://www.dropbox.com/s/x2hjw2p4jay08nz/LibriSpeech.tar.bz2.partac?dl=0" -o LibriSpeech.tar.bz2.partac
106 | curl -L "https://www.dropbox.com/s/jy70yoqmpzqoi05/LibriSpeech.tar.bz2.partad?dl=0" -o LibriSpeech.tar.bz2.partad
107 | curl -L "https://www.dropbox.com/s/qwzj2d4cuovohz1/LibriSpeech.tar.bz2.partae?dl=0" -o LibriSpeech.tar.bz2.partae
108 | curl -L "https://www.dropbox.com/s/bndq8zp4udhhd6g/LibriSpeech.tar.bz2.partaf?dl=0" -o LibriSpeech.tar.bz2.partaf
109 | curl -L "https://www.dropbox.com/s/ar0qx5rjoto5iow/LibriSpeech.tar.bz2.partag?dl=0" -o LibriSpeech.tar.bz2.partag
110 | curl -L "https://www.dropbox.com/s/fhcen1irrtez1v7/LibriSpeech.tar.bz2.partah?dl=0" -o LibriSpeech.tar.bz2.partah
111 | curl -L "https://www.dropbox.com/s/5fydv42rcelwt9j/LibriSpeech.tar.bz2.partai?dl=0" -o LibriSpeech.tar.bz2.partai
112 | curl -L "https://www.dropbox.com/s/dt3v1x2pkbulzgx/LibriSpeech.tar.bz2.partaj?dl=0" -o LibriSpeech.tar.bz2.partaj
113 | curl -L "https://www.dropbox.com/s/2fe9a8g8fmjd2ao/LibriSpeech.tar.bz2.partak?dl=0" -o LibriSpeech.tar.bz2.partak
114 | curl -L "https://www.dropbox.com/s/362rllp8fy3xvzb/LibriSpeech.tar.bz2.partal?dl=0" -o LibriSpeech.tar.bz2.partal
115 | curl -L "https://www.dropbox.com/s/kxoow98bdqblswq/LibriSpeech.tar.bz2.partam?dl=0" -o LibriSpeech.tar.bz2.partam
116 | curl -L "https://www.dropbox.com/s/u7s8itp8ocyqodn/LibriSpeech.tar.bz2.partan?dl=0" -o LibriSpeech.tar.bz2.partan
117 | curl -L "https://www.dropbox.com/s/5u5g6bm3cruhiep/LibriSpeech.tar.bz2.partao?dl=0" -o LibriSpeech.tar.bz2.partao
118 | curl -L "https://www.dropbox.com/s/6e5njq4x5756ttx/LibriSpeech.tar.bz2.partap?dl=0" -o LibriSpeech.tar.bz2.partap
119 | curl -L "https://www.dropbox.com/s/pdxp9jb27xz4wpx/LibriSpeech.tar.bz2.partaq?dl=0" -o LibriSpeech.tar.bz2.partaq
120 | curl -L "https://www.dropbox.com/s/92jqmcyu412nbcu/LibriSpeech.tar.bz2.sha256?dl=0" -o LibriSpeech.tar.bz2.sha256
121 | sha256sum -c LibriSpeech.tar.bz2.sha256 && rm LibriSpeech.tar.bz2.sha256
122 | cat LibriSpeech.tar.bz2.parta* | tar -C data --use-compress-program lbunzip2 -xvf -
123 | rm -f LibriSpeech.tar.bz2.parta*
124 | echo "...done"
125 |
126 | echo "Downloading Ch18 data..."
127 | curl -L "https://www.dropbox.com/s/gmhhij7uand8e42/seq2seq.tar.xz?dl=0" -o seq2seq.tar.xz
128 | curl -L "https://www.dropbox.com/s/1oxf6etpff8fsin/seq2seq.tar.xz.sha256?dl=0" -o seq2seq.tar.xz.sha256
129 | sha256sum -c seq2seq.tar.xz.sha256 && rm seq2seq.tar.xz.sha256
130 | tar -C data -xJvf seq2seq.tar.xz && rm seq2seq.tar.xz
131 | echo "...done"
132 |
133 | echo "Downloading Ch19 data..."
134 | curl -L "https://www.dropbox.com/s/66uhiglbibstrsd/cloth_folding_rgb_vids.tar.xz?dl=0" -o cloth_folding_rgb_vids.tar.xz
135 | curl -L "https://www.dropbox.com/s/siufwdahl5g6muo/cloth_folding_rgb_vids.tar.xz.sha256?dl=0" -o cloth_folding_rgb_vids.tar.xz.sha256
136 | sha256sum -c cloth_folding_rgb_vids.tar.xz.sha256 && rm cloth_folding_rgb_vids.tar.xz.sha256
137 | tar -C data -xJvf cloth_folding_rgb_vids.tar.xz && rm cloth_folding_rgb_vids.tar.xz
138 | echo "...done"
139 |
140 |
--------------------------------------------------------------------------------
/download-libs.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Downloading Ch7 libraries..."
4 | mkdir -p libs/BregmanToolkit/
5 | curl -L "https://www.dropbox.com/sh/dkud00y61fq3ou7/AABYAwHiDmisx13rOAbvQzOWa?dl=0" -o libs/BregmanToolkit/BregmanToolkit.zip
6 | pushd libs/BregmanToolkit/
7 | unzip BregmanToolkit.zip && rm BregmanToolkit.zip
8 | popd
9 | echo "...done"
10 |
11 | echo "Downloading Ch15 libraries..."
12 | curl -L "https://www.dropbox.com/s/ljgs3349l914xhr/haarcascade_frontalface_default.xml?dl=0" -o libs/haarcascade_frontalface_default.xml
13 | echo "...done"
14 |
15 | echo "Downloading Ch17 libraries..."
16 | mkdir -p libs/basic_units/
17 | curl -L "https://www.dropbox.com/s/bqmzpyfzz8eifna/basic_units.py?dl=0" -o libs/basic_units/basic_units.py
18 | pushd libs
19 | curl -L "https://github.com/mrubash1/RNN-Tutorial/archive/master.zip" -o RNN-Tutorial.zip
20 | unzip RNN-Tutorial.zip
21 | mv RNN-Tutorial-master RNN-Tutorial && rm -rf RNN-Tutorial.zip
22 | popd
23 | echo "...done"
24 |
25 | echo "Downloading Ch19 libraries..."
26 | mkdir -p libs/vgg16
27 | pushd libs/vgg16
28 | curl -LO "https://www.cs.toronto.edu/~frossard/vgg16/vgg16_weights.npz"
29 | curl -LO "https://www.cs.toronto.edu/~frossard/vgg16/vgg16.py"
30 | curl -LO "https://www.cs.toronto.edu/~frossard/vgg16/imagenet_classes.py"
31 | curl -LO "https://www.cs.toronto.edu/~frossard/vgg16/laska.png"
32 | popd
33 | echo "...done"
34 |
35 |
--------------------------------------------------------------------------------
/figs/Figures.graffle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures.graffle
--------------------------------------------------------------------------------
/figs/Figures2.graffle/data.plist:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/data.plist
--------------------------------------------------------------------------------
/figs/Figures2.graffle/image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/image1.png
--------------------------------------------------------------------------------
/figs/Figures2.graffle/image2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/image2.png
--------------------------------------------------------------------------------
/figs/Figures2.graffle/image3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/image3.png
--------------------------------------------------------------------------------
/figs/Figures2.graffle/image4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/image4.png
--------------------------------------------------------------------------------
/figs/Figures2.graffle/image5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/Figures2.graffle/image5.png
--------------------------------------------------------------------------------
/figs/android_pos_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/android_pos_model.png
--------------------------------------------------------------------------------
/figs/ch06-final-bow-model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch06-final-bow-model.png
--------------------------------------------------------------------------------
/figs/ch10-apertium-p1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch10-apertium-p1.png
--------------------------------------------------------------------------------
/figs/ch10-apertium-p2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch10-apertium-p2.png
--------------------------------------------------------------------------------
/figs/ch12-autoencoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch12-autoencoder.png
--------------------------------------------------------------------------------
/figs/ch13-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch13-network.png
--------------------------------------------------------------------------------
/figs/ch13-q-function.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch13-q-function.png
--------------------------------------------------------------------------------
/figs/ch15-cnn-arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/ch15-cnn-arch.png
--------------------------------------------------------------------------------
/figs/mltf2-cover.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chrismattmann/MLwithTensorFlow2ed/479f74e54c42a231b058472407e82b37c61dac88/figs/mltf2-cover.jpeg
--------------------------------------------------------------------------------
/mltf-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 | # Install Py2 kernel
4 | if [[ -f /usr/install/python27/bin/python ]]; then
5 | /usr/install/python27/bin/python -m ipykernel install --user
6 | fi
7 |
8 | # Start Jupyter and monitor it
9 | jupyter notebook --notebook-dir=/usr/src/mltf2 --no-browser --ip 0.0.0.0 --allow-root
10 |
11 |
--------------------------------------------------------------------------------
/models/delete.txt:
--------------------------------------------------------------------------------
1 | Temporary file.
2 |
--------------------------------------------------------------------------------
/requirements-gpu-py2.txt:
--------------------------------------------------------------------------------
1 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp27-none-linux_x86_64.whl; sys.platform == "linux2"
2 | jupyter
3 | pandas
4 | pandas_datareader
5 | matplotlib==2.0.2
6 | nltk
7 | tqdm
8 | sklearn
9 | scikit-image
10 | tika
11 | ystockquote
12 | yfinance
13 | requests
14 | opencv-python==4.0.1.24
15 | pydub
16 | pillow
17 |
--------------------------------------------------------------------------------
/requirements-py2.txt:
--------------------------------------------------------------------------------
1 | jupyter==1.0.0
2 | pandas==0.24.2
3 | pandas-datareader==0.8.1
4 | matplotlib==2.0.2
5 | nltk==3.5
6 | tqdm==4.48.2
7 | sklearn
8 | scikit-image==0.14.5
9 | tika==1.24
10 | ystockquote==0.2.5
11 | yfinance==0.1.54
12 | requests==2.24.0
13 | opencv-python==4.0.1.24
14 | pydub==0.24.1
15 | Pillow==7.1.0
16 |
--------------------------------------------------------------------------------
/requirements-tf2.txt:
--------------------------------------------------------------------------------
1 | absl-py~=0.10.0
2 | appdirs~=1.4.4
3 | argon2-cffi~=20.1.0
4 | astunparse~=1.6.3
5 | attrs~=20.1.0
6 | audioread~=2.1.8
7 | backcall~=0.2.0
8 | bleach~=3.1.5
9 | cachetools~=4.1.1
10 | certifi~=2020.6.20
11 | cffi~=1.14.2
12 | chardet~=3.0.4
13 | click~=7.1.2
14 | cloudpickle~=1.6.0
15 | cycler~=0.10.0
16 | decorator~=4.4.2
17 | deepspeech~=0.8.0
18 | defusedxml~=0.6.0
19 | dill~=0.3.2
20 | entrypoints~=0.3
21 | future~=0.18.2
22 | gast~=0.3.3
23 | google-auth~=1.21.0
24 | google-auth-oauthlib~=0.4.1
25 | google-pasta~=0.2.0
26 | googleapis-common-protos~=1.52.0
27 | grpcio~=1.31.0
28 | h5py~=2.10.0
29 | horovod~=0.20.0
30 | idna~=2.10
31 | imageio~=2.9.0
32 | jedi~=0.17.2
33 | Jinja2~=2.11.2
34 | joblib~=0.16.0
35 | jsonschema~=3.2.0
36 | Keras~=2.4.3
37 | Keras-Preprocessing~=1.1.2
38 | kiwisolver~=1.2.0
39 | librosa~=0.8.0
40 | llvmlite~=0.34.0
41 | lxml~=4.5.2
42 | Markdown~=3.2.2
43 | MarkupSafe~=1.1.1
44 | matplotlib~=3.3.1
45 | mistune~=0.8.4
46 | multitasking~=0.0.9
47 | nbconvert~=5.6.1
48 | nbformat~=5.0.7
49 | networkx~=2.5
50 | nltk~=3.5
51 | notebook~=6.1.3
52 | numba~=0.51.2
53 | numpy~=1.17.3
54 | oauthlib~=3.1.0
55 | opencv-python~=4.4.0.42
56 | opt-einsum~=3.3.0
57 | packaging~=20.4
58 | pandas~=1.1.2
59 | pandas-datareader~=0.9.0
60 | pandocfilters~=1.4.2
61 | parso~=0.7.1
62 | pexpect~=4.8.0
63 | pickleshare~=0.7.5
64 | Pillow~=7.1.0
65 | pooch~=1.2.0
66 | prometheus-client~=0.8.0
67 | promise~=2.3
68 | prompt-toolkit~=3.0.7
69 | protobuf~=3.13.0
70 | psutil~=5.7.2
71 | ptyprocess~=0.6.0
72 | pyasn1~=0.4.8
73 | pyasn1-modules~=0.2.8
74 | pycparser~=2.20
75 | Pygments~=2.6.1
76 | pyparsing~=2.4.7
77 | PyQt5~=5.15.0
78 | PyQt5-sip~=12.8.1
79 | pyrsistent~=0.16.0
80 | python-dateutil~=2.8.1
81 | python-speech-features~=0.6
82 | pytz~=2020.1
83 | PyWavelets~=1.1.1
84 | PyYAML~=5.3.1
85 | pyzmq~=19.0.2
86 | qtconsole~=4.7.6
87 | QtPy~=1.9.0
88 | regex~=2020.7.14
89 | requests~=2.24.0
90 | requests-oauthlib~=1.3.0
91 | resampy~=0.2.2
92 | rsa~=4.6
93 | scikit-image~=0.17.2
94 | scikit-learn~=0.23.2
95 | scipy~=1.4.1
96 | seaborn~=0.11.0
97 | Send2Trash~=1.5.0
98 | six~=1.15.0
99 | SoundFile~=0.10.3.post1
100 | tensorboard~=2.3.0
101 | tensorboard-plugin-wit~=1.7.0
102 | tensorflow-addons~=0.11.2
103 | tensorflow-datasets~=3.2.1
104 | tensorflow-estimator~=2.3.0
105 | tensorflow-metadata~=0.24.0
106 | termcolor~=1.1.0
107 | terminado~=0.8.3
108 | testpath~=0.4.4
109 | tf-slim~=1.1.0
110 | threadpoolctl~=2.1.0
111 | tifffile~=2020.9.3
112 | tika~=1.24
113 | tornado~=6.0.4
114 | tqdm~=4.48.2
115 | typeguard~=2.9.1
116 | urllib3~=1.25.10
117 | wcwidth~=0.2.5
118 | webencodings~=0.5.1
119 | webrtcvad~=2.0.10
120 | Werkzeug~=1.0.1
121 | widgetsnbextension~=3.5.1
122 | wrapt~=1.12.1
123 | yfinance~=0.1.54
124 | ystockquote~=0.2.5
125 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pandas==1.1.0
2 | pandas_datareader==0.9.0
3 | matplotlib==3.0.2
4 | nltk==3.5
5 | tqdm==4.48.2
6 | sklearn
7 | scikit-image==0.17.2
8 | tika==1.24
9 | ystockquote==0.2.5
10 | yfinance==0.1.54
11 | requests==2.24.0
12 | opencv-python==4.0.1.24
13 | pydub==0.24.1
14 | horovod~=0.18.2
15 | python-speech-features==0.6
16 | keras==2.2.4
--------------------------------------------------------------------------------
/run_TFv2_environment.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # TODO: restrict gpu run to 127.0.0.1 again
3 | if [[ "$(uname)" == "Linux" && -c /dev/nvidia0 ]]; then
4 | docker run --gpus all -p 127.0.0.1:8888:8888 --rm chrismattmann/mltf2:tf2
5 | else
6 | docker run -p 127.0.0.1:8888:8888 --rm chrismattmann/mltf2:tf2
7 | fi
8 |
--------------------------------------------------------------------------------
/run_environment.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [[ "$(uname)" == "Linux" && -c /dev/nvidia0 ]]; then
4 | docker run --gpus all -p 127.0.0.1:8888:8888 --rm -it chrismattmann/mltf2:latest
5 | else
6 | docker run -p 127.0.0.1:8888:8888 --rm -it chrismattmann/mltf2:latest
7 | fi
8 |
--------------------------------------------------------------------------------