├── .gitattributes ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.rst ├── LICENSE ├── README.rst ├── _img ├── 0-welcome │ └── graph-run.png ├── 1-basics │ ├── basic_math_operations │ │ └── graph-run.png │ └── readme.rst ├── 2-basics_in_machine_learning │ └── linear_regression │ │ └── updating_model.gif ├── 3-neural_network │ ├── convolutiona_neural_network │ │ ├── accuracy_train.png │ │ ├── activation_fc4_train.png │ │ ├── architecture.png │ │ ├── classifier_image.png │ │ ├── convlayer.png │ │ ├── graph.png │ │ ├── histogram_fc4_train.png │ │ ├── loss_accuracy_train.png │ │ ├── loss_train.png │ │ ├── terminal_training.png │ │ └── test_accuracy.png │ └── multi-layer-perceptron │ │ └── neural-network.png └── mainpage │ ├── CNNs.png │ ├── TensorFlow_World.gif │ ├── Tensor_GIF.gif │ ├── Tensor_GIF_ff.gif │ ├── basicmodels.gif │ ├── basicmodels.png │ ├── basics-old.png │ ├── basics.gif │ ├── installation-logo.gif │ ├── installation.gif │ ├── welcome.gif │ └── welcome.png ├── codes ├── ipython │ ├── 0-welcome │ │ ├── README.rst │ │ └── code │ │ │ └── 0-welcome.ipynb │ ├── 1-basics │ │ ├── basic_math_operations │ │ │ ├── README.rst │ │ │ └── code │ │ │ │ └── basic_math_operation.ipynb │ │ ├── readme.rst │ │ └── variables │ │ │ ├── README.rst │ │ │ └── code │ │ │ └── variables.ipynb │ ├── 2-basics_in_machine_learning │ │ ├── linear_regression │ │ │ ├── README.rst │ │ │ ├── code │ │ │ │ └── linear_regression.ipynb │ │ │ └── updating_model.gif │ │ ├── linear_svm │ │ │ ├── README.rst │ │ │ └── code │ │ │ │ └── linear_svm.ipynb │ │ ├── logistic_regression │ │ │ ├── README.rst │ │ │ └── code │ │ │ │ └── logistic_regression.ipynb │ │ └── multiclass_svm │ │ │ ├── README.rst │ │ │ └── code │ │ │ └── multiclass_svm.ipynb │ └── 3-neural_networks │ │ ├── convolutional-neural-network │ │ ├── code │ │ │ ├── __init__.py │ │ │ ├── auxiliary │ │ │ │ ├── __init__.py │ │ │ │ ├── progress_bar.py │ │ │ │ └── progress_bar.py~ │ │ │ ├── evaluation.sh │ │ │ ├── input_function │ │ │ │ ├── __init__.py │ │ │ │ └── input.py │ │ │ ├── net_structure │ │ │ │ ├── __init__.py │ │ │ │ └── net.py │ │ │ ├── script │ │ │ │ ├── evaluation.sh │ │ │ │ └── train.sh │ │ │ ├── test_classifier.py │ │ │ ├── train.sh │ │ │ ├── train_classifier.py │ │ │ └── train_evaluation.py │ │ ├── readme.rst │ │ ├── readme.rst~ │ │ └── requirements.txt │ │ ├── multi-layer-perceptron │ │ ├── code │ │ │ ├── evaluation.sh │ │ │ ├── test_classifier.py │ │ │ ├── train.sh │ │ │ ├── train_mlp.ipynb │ │ │ └── train_mlp.py │ │ ├── readme.rst │ │ └── requirements.txt │ │ ├── readme.md │ │ └── recurrent-neural-networks │ │ ├── README.rst │ │ └── code │ │ ├── MNIST_data │ │ ├── t10k-images-idx3-ubyte.gz │ │ ├── t10k-labels-idx1-ubyte.gz │ │ ├── train-images-idx3-ubyte.gz │ │ └── train-labels-idx1-ubyte.gz │ │ └── rnn.ipynb └── python │ ├── 0-welcome │ ├── README.rst │ └── code │ │ ├── 0-welcome.py │ │ └── TensorFlow_Test.py │ ├── 1-basics │ ├── basic_math_operations │ │ ├── README.rst │ │ └── code │ │ │ └── basic_math_operation.py │ ├── readme.rst │ └── variables │ │ ├── README.rst │ │ └── code │ │ └── variables.py │ ├── 2-basics_in_machine_learning │ ├── linear_regression │ │ ├── README.rst │ │ ├── code │ │ │ └── linear_regression.py │ │ └── updating_model.gif │ ├── linear_svm │ │ ├── README.rst │ │ └── code │ │ │ └── linear_svm.py │ ├── logistic_regression │ │ ├── README.rst │ │ └── code │ │ │ └── logistic_regression.py │ └── multiclass_svm │ │ ├── README.rst │ │ └── code │ │ └── multiclass_svm.py │ └── 3-neural_networks │ ├── convolutional-neural-network │ ├── code │ │ ├── __init__.py │ │ ├── auxiliary │ │ │ ├── __init__.py │ │ │ ├── progress_bar.py │ │ │ └── progress_bar.py~ │ │ ├── evaluation.sh │ │ ├── input_function │ │ │ ├── __init__.py │ │ │ └── input.py │ │ ├── net_structure │ │ │ ├── __init__.py │ │ │ └── net.py │ │ ├── script │ │ │ ├── evaluation.sh │ │ │ └── train.sh │ │ ├── test_classifier.py │ │ ├── train.sh │ │ ├── train_classifier.py │ │ └── train_evaluation.py │ ├── readme.rst │ └── requirements.txt │ ├── multi-layer-perceptron │ ├── code │ │ ├── evaluation.sh │ │ ├── test_classifier.py │ │ ├── train.sh │ │ ├── train_mlp.ipynb │ │ └── train_mlp.py │ ├── readme.rst │ └── requirements.txt │ ├── readme.md │ └── recurrent-neural-networks │ ├── README.rst │ └── code │ ├── MNIST_data │ ├── t10k-images-idx3-ubyte.gz │ ├── t10k-labels-idx1-ubyte.gz │ ├── train-images-idx3-ubyte.gz │ └── train-labels-idx1-ubyte.gz │ └── rnn.py ├── docs ├── Makefile ├── README.rst ├── _img │ ├── 0-welcome │ │ └── graph-run.png │ ├── 1-basics │ │ ├── basic_math_operations │ │ │ └── graph-run.png │ │ └── readme.rst │ ├── 2-basics_in_machine_learning │ │ └── linear_regression │ │ │ └── updating_model.gif │ ├── 3-neural_network │ │ ├── autoencoder │ │ │ ├── README.rst │ │ │ └── ae.png │ │ ├── convolutiona_neural_network │ │ │ ├── accuracy_train.png │ │ │ ├── activation_fc4_train.png │ │ │ ├── architecture.png │ │ │ ├── classifier_image.png │ │ │ ├── convlayer.png │ │ │ ├── graph.png │ │ │ ├── histogram_fc4_train.png │ │ │ ├── loss_accuracy_train.png │ │ │ ├── loss_train.png │ │ │ ├── terminal_training.png │ │ │ └── test_accuracy.png │ │ └── multi-layer-perceptron │ │ │ └── neural-network.png │ └── mainpage │ │ ├── TensorFlow_World.gif │ │ ├── Tensor_GIF.gif │ │ ├── Tensor_GIF_ff.gif │ │ └── installation.gif ├── conf.py ├── index.rst ├── make.bat └── tutorials │ ├── 0-welcome │ └── README.rst │ ├── 1-basics │ ├── basic_math_operations │ │ └── README.rst │ ├── readme.rst │ └── variables │ │ └── README.rst │ ├── 2-basics_in_machine_learning │ ├── linear_regression │ │ └── README.rst │ └── logistic_regression │ │ └── README.rst │ ├── 3-neural_network │ ├── autoencoder │ │ └── README.rst │ └── convolutiona_neural_network │ │ └── README.rst │ └── installation │ └── README.rst ├── requirements.txt └── welcome.py /.gitattributes: -------------------------------------------------------------------------------- 1 | codes/ipython/* linguist-vendored 2 | 3 | # Basic .gitattributes for a python repo. 4 | 5 | # Source files 6 | # ============ 7 | .pxd text diff=python 8 | .py text diff=python 9 | .py3 text diff=python 10 | .pyw text diff=python 11 | .pyx text diff=python 12 | 13 | # Binary files 14 | # ============ 15 | .db binary 16 | .p binary 17 | .pkl binary 18 | .pyc binary 19 | .pyd binary 20 | .pyo binary 21 | 22 | # Note: .db, .p, and .pkl files are associated 23 | # with the python modules ``pickle``, ``dbm.*``, 24 | # ``shelve``, ``marshal``, ``anydbm``, & ``bsddb`` 25 | # (among others). 26 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at amirsina.torfi@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | 2 | ************* 3 | Contributing 4 | ************* 5 | 6 | When contributing to this repository, please first discuss the change you wish to make via issue, 7 | email, or any other method with the owners of this repository before making a change. *For typos, please 8 | do not create a pull request. Instead, declare them in issues or email the repository owner*. 9 | 10 | Please note we have a code of conduct, please follow it in all your interactions with the project. 11 | 12 | ==================== 13 | Pull Request Process 14 | ==================== 15 | 16 | Please consider the following criterions in order to help us in a better way: 17 | 18 | 1. The pull request is mainly expected to be a code script suggestion or improvement. 19 | 2. A pull request related to non-code-script sections is expected to make a significant difference in the documentation. Otherwise, it is expected to be announced in the issues section. 20 | 3. Ensure any install or build dependencies are removed before the end of the layer when doing a 21 | build and creating a pull request. 22 | 4. Add comments with details of changes to the interface, this includes new environment 23 | variables, exposed ports, useful file locations and container parameters. 24 | 5. You may merge the Pull Request in once you have the sign-off of at least one other developer, or if you 25 | do not have permission to do that, you may request the owner to merge it for you if you believe all checks are passed. 26 | 27 | ============ 28 | Final Note 29 | ============ 30 | 31 | We are looking forward to your kind feedback. Please help us to improve this open source project and make our work better. 32 | For contribution, please create a pull request and we will investigate it promptly. Once again, we appreciate 33 | your kind feedback and elaborate code inspections. 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Amirsina Torfi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /_img/0-welcome/graph-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/0-welcome/graph-run.png -------------------------------------------------------------------------------- /_img/1-basics/basic_math_operations/graph-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/1-basics/basic_math_operations/graph-run.png -------------------------------------------------------------------------------- /_img/1-basics/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Basics 3 | ============================== 4 | 5 | 6 | -------------------------------------------------------------------------------- /_img/2-basics_in_machine_learning/linear_regression/updating_model.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/2-basics_in_machine_learning/linear_regression/updating_model.gif -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/accuracy_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/accuracy_train.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/activation_fc4_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/activation_fc4_train.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/architecture.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/classifier_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/classifier_image.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/convlayer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/convlayer.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/graph.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/histogram_fc4_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/histogram_fc4_train.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/loss_accuracy_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/loss_accuracy_train.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/loss_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/loss_train.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/terminal_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/terminal_training.png -------------------------------------------------------------------------------- /_img/3-neural_network/convolutiona_neural_network/test_accuracy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/convolutiona_neural_network/test_accuracy.png -------------------------------------------------------------------------------- /_img/3-neural_network/multi-layer-perceptron/neural-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/3-neural_network/multi-layer-perceptron/neural-network.png -------------------------------------------------------------------------------- /_img/mainpage/CNNs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/CNNs.png -------------------------------------------------------------------------------- /_img/mainpage/TensorFlow_World.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/TensorFlow_World.gif -------------------------------------------------------------------------------- /_img/mainpage/Tensor_GIF.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/Tensor_GIF.gif -------------------------------------------------------------------------------- /_img/mainpage/Tensor_GIF_ff.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/Tensor_GIF_ff.gif -------------------------------------------------------------------------------- /_img/mainpage/basicmodels.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/basicmodels.gif -------------------------------------------------------------------------------- /_img/mainpage/basicmodels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/basicmodels.png -------------------------------------------------------------------------------- /_img/mainpage/basics-old.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/basics-old.png -------------------------------------------------------------------------------- /_img/mainpage/basics.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/basics.gif -------------------------------------------------------------------------------- /_img/mainpage/installation-logo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/installation-logo.gif -------------------------------------------------------------------------------- /_img/mainpage/installation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/installation.gif -------------------------------------------------------------------------------- /_img/mainpage/welcome.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/welcome.gif -------------------------------------------------------------------------------- /_img/mainpage/welcome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/_img/mainpage/welcome.png -------------------------------------------------------------------------------- /codes/ipython/0-welcome/README.rst: -------------------------------------------------------------------------------- 1 | 2 | =========================== 3 | Welcome to TensorFlow World 4 | =========================== 5 | 6 | This document is dedicated to explain how to run the python script for this tutorial. 7 | 8 | --------------------------- 9 | Test TensorFlow Environment 10 | --------------------------- 11 | 12 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 13 | 14 | .. code:: shell 15 | 16 | cd code/ 17 | python TensorFlow_Test.py 18 | 19 | -------------------------------- 20 | How to run the code in Terminal? 21 | -------------------------------- 22 | 23 | 24 | Please root to the ``code/`` directory and run the python script as the general form of below: 25 | 26 | .. code:: shell 27 | 28 | python [python_code_file.py] --log_dir='absolute/path/to/log_dir' 29 | 30 | 31 | As an example the code can be executed as follows: 32 | 33 | .. code:: shell 34 | 35 | python 1-welcome.py --log_dir='~/log_dir' 36 | 37 | The ``--log_dir`` flag is to provide the address which the event files (for visualizing in Tensorboard) will be saved. The flag of ``--log_dir`` is not required because its default value is available in the source code as follows: 38 | 39 | .. code:: python 40 | 41 | tf.app.flags.DEFINE_string( 42 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 43 | 'Directory where event logs are written to.') 44 | 45 | ---------------------------- 46 | How to run the code in IDEs? 47 | ---------------------------- 48 | 49 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 50 | 51 | 52 | ---------------------------- 53 | How to run the Tensorboard? 54 | ---------------------------- 55 | .. _Google’s words: https://www.tensorflow.org/get_started/summaries_and_tensorboard 56 | TensorBoard is the graph visualization tools provided by TensorFlow. Using `Google’s words`_: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, 57 | debug, and optimize TensorFlow programs, we've included a suite of visualization tools called 58 | TensorBoard.” 59 | 60 | The Tensorboard can be run as follows in the terminal: 61 | 62 | .. code:: shell 63 | 64 | tensorboard --logdir="absolute/path/to/log_dir" 65 | 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /codes/ipython/0-welcome/code/0-welcome.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "#####################################################\n", 12 | "########## Welcome to TensorFlow World ##############\n", 13 | "#####################################################\n", 14 | "\n", 15 | "# The tutorials in this section is just a start for going into TensorFlow world.\n", 16 | "# The TensorFlow flags are used for having a more user friendly environment.\n", 17 | "\n", 18 | "from __future__ import print_function\n", 19 | "import tensorflow as tf\n", 20 | "import os" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 3, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "# Defining some sentence!\n", 30 | "welcome = tf.constant('Welcome to TensorFlow world!')" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 4, 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "name": "stdout", 40 | "output_type": "stream", 41 | "text": [ 42 | "output: Welcome to TensorFlow world!\n" 43 | ] 44 | } 45 | ], 46 | "source": [ 47 | "# Run the session\n", 48 | "with tf.Session() as sess:\n", 49 | " print(\"output: \", sess.run(welcome))" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 6, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "# Closing the session.\n", 59 | "sess.close()" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": { 66 | "collapsed": true 67 | }, 68 | "outputs": [], 69 | "source": [] 70 | } 71 | ], 72 | "metadata": { 73 | "kernelspec": { 74 | "display_name": "Python 2", 75 | "language": "python", 76 | "name": "python2" 77 | }, 78 | "language_info": { 79 | "codemirror_mode": { 80 | "name": "ipython", 81 | "version": 2 82 | }, 83 | "file_extension": ".py", 84 | "mimetype": "text/x-python", 85 | "name": "python", 86 | "nbconvert_exporter": "python", 87 | "pygments_lexer": "ipython2", 88 | "version": "2.7.12" 89 | } 90 | }, 91 | "nbformat": 4, 92 | "nbformat_minor": 2 93 | } 94 | -------------------------------------------------------------------------------- /codes/ipython/1-basics/basic_math_operations/README.rst: -------------------------------------------------------------------------------- 1 | 2 | =========================== 3 | Welcome to TensorFlow World 4 | =========================== 5 | 6 | This document is dedicated to explain how to run the python script for this tutorial. 7 | 8 | 9 | -------------------------------- 10 | How to run the code in Terminal? 11 | -------------------------------- 12 | 13 | 14 | Please root to the ``code/`` directory and run the python script as the general form of below: 15 | 16 | .. code:: shell 17 | 18 | python [python_code_file.py] --log_dir='absolute/path/to/log_dir' 19 | 20 | 21 | As an example the code can be executed as follows: 22 | 23 | .. code:: shell 24 | 25 | python 1-welcome.py --log_dir='~/log_dir' 26 | 27 | The ``--log_dir`` flag is to provide the address which the event files (for visualizing in Tensorboard) will be saved. The flag of ``--log_dir`` is not required because its default value is available in the source code as follows: 28 | 29 | .. code:: python 30 | 31 | tf.app.flags.DEFINE_string( 32 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 33 | 'Directory where event logs are written to.') 34 | 35 | ---------------------------- 36 | How to run the code in IDEs? 37 | ---------------------------- 38 | 39 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 40 | 41 | 42 | ---------------------------- 43 | How to run the Tensorboard? 44 | ---------------------------- 45 | .. _Google’s words: https://www.tensorflow.org/get_started/summaries_and_tensorboard 46 | TensorBoard is the graph visualization tools provided by TensorFlow. Using `Google’s words`_: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, 47 | debug, and optimize TensorFlow programs, we've included a suite of visualization tools called 48 | TensorBoard.” 49 | 50 | The Tensorboard can be run as follows in the terminal: 51 | 52 | .. code:: shell 53 | 54 | tensorboard --logdir="absolute/path/to/log_dir" 55 | 56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /codes/ipython/1-basics/basic_math_operations/code/basic_math_operation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "#####################################################\n", 12 | "########## Welcome to TensorFlow World ##############\n", 13 | "#####################################################\n", 14 | "\n", 15 | "# The tutorials in this section is just a start for math operations.\n", 16 | "# The TensorFlow flags are used for having a more user friendly environment.\n", 17 | "\n", 18 | "from __future__ import print_function\n", 19 | "import tensorflow as tf\n", 20 | "import os" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": { 27 | "collapsed": true 28 | }, 29 | "outputs": [], 30 | "source": [ 31 | "# Defining some constant values\n", 32 | "a = tf.constant(5.0, name=\"a\")\n", 33 | "b = tf.constant(10.0, name=\"b\")" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "metadata": { 40 | "collapsed": true 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "# Some basic operations\n", 45 | "x = tf.add(a, b, name=\"add\")\n", 46 | "y = tf.div(a, b, name=\"divide\")" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 4, 52 | "metadata": { 53 | "collapsed": false 54 | }, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "a = 5.0\n", 61 | "b = 10.0\n", 62 | "a + b = 15.0\n", 63 | "a/b = 0.5\n" 64 | ] 65 | } 66 | ], 67 | "source": [ 68 | "# Run the session\n", 69 | "with tf.Session() as sess:\n", 70 | " print(\"a =\", sess.run(a))\n", 71 | " print(\"b =\", sess.run(b))\n", 72 | " print(\"a + b =\", sess.run(x))\n", 73 | " print(\"a/b =\", sess.run(y))" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 5, 79 | "metadata": { 80 | "collapsed": true 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "# Closing the session.\n", 85 | "sess.close()" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": { 92 | "collapsed": true 93 | }, 94 | "outputs": [], 95 | "source": [] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "Python 2", 101 | "language": "python", 102 | "name": "python2" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 2 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython2", 114 | "version": "2.7.13" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 2 119 | } 120 | -------------------------------------------------------------------------------- /codes/ipython/1-basics/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Basics 3 | ============================== 4 | 5 | 6 | -------------------------------------------------------------------------------- /codes/ipython/1-basics/variables/README.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | Variables 3 | ========== 4 | 5 | This source code is dedicated to define and initialize variables. 6 | 7 | 8 | -------------------------------- 9 | How to run the code in Terminal? 10 | -------------------------------- 11 | 12 | 13 | Please root to the ``code/`` directory and run the python script as the general form of below: 14 | 15 | .. code:: shell 16 | 17 | python [python_code_file.py] 18 | 19 | 20 | As an example the code can be executed as follows: 21 | 22 | .. code:: shell 23 | 24 | python variable.py 25 | 26 | ---------------------------- 27 | How to run the code in IDEs? 28 | ---------------------------- 29 | 30 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /codes/ipython/1-basics/variables/code/variables.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 6, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "## This code create some arbitrary variables and initialize them ###\n", 12 | "# The goal is to show how to define and initialize variables from scratch.\n", 13 | "\n", 14 | "import tensorflow as tf\n", 15 | "from tensorflow.python.framework import ops" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 7, 21 | "metadata": { 22 | "collapsed": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "#######################################\n", 27 | "######## Defining Variables ###########\n", 28 | "#######################################\n", 29 | "\n", 30 | "# Create three variables with some default values.\n", 31 | "weights = tf.Variable(tf.random_normal([2, 3], stddev=0.1),\n", 32 | " name=\"weights\")\n", 33 | "biases = tf.Variable(tf.zeros([3]), name=\"biases\")\n", 34 | "custom_variable = tf.Variable(tf.zeros([3]), name=\"custom\")\n", 35 | "\n", 36 | "# Get all the variables' tensors and store them in a list.\n", 37 | "all_variables_list = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 8, 43 | "metadata": { 44 | "collapsed": true 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "############################################\n", 49 | "######## Customized initializer ############\n", 50 | "############################################\n", 51 | "\n", 52 | "## Initialation of some custom variables.\n", 53 | "## In this part we choose some variables and only initialize them rather than initializing all variables.\n", 54 | "\n", 55 | "# \"variable_list_custom\" is the list of variables that we want to initialize.\n", 56 | "variable_list_custom = [weights, custom_variable]\n", 57 | "\n", 58 | "# The initializer\n", 59 | "init_custom_op = tf.variables_initializer(var_list=variable_list_custom )" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 9, 65 | "metadata": { 66 | "collapsed": true 67 | }, 68 | "outputs": [], 69 | "source": [ 70 | "########################################\n", 71 | "######## Global initializer ############\n", 72 | "########################################\n", 73 | "\n", 74 | "# Method-1\n", 75 | "# Add an op to initialize the variables.\n", 76 | "init_all_op = tf.global_variables_initializer()\n", 77 | "\n", 78 | "# Method-2\n", 79 | "init_all_op = tf.variables_initializer(var_list=all_variables_list)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 10, 85 | "metadata": { 86 | "collapsed": true 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "##########################################################\n", 91 | "######## Initialization using other variables ############\n", 92 | "##########################################################\n", 93 | "\n", 94 | "# Create another variable with the same value as 'weights'.\n", 95 | "WeightsNew = tf.Variable(weights.initialized_value(), name=\"WeightsNew\")\n", 96 | "\n", 97 | "# Now, the variable must be initialized.\n", 98 | "init_WeightsNew_op = tf.variables_initializer(var_list=[WeightsNew])" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 11, 104 | "metadata": { 105 | "collapsed": true 106 | }, 107 | "outputs": [], 108 | "source": [ 109 | "######################################\n", 110 | "####### Running the session ##########\n", 111 | "######################################\n", 112 | "with tf.Session() as sess:\n", 113 | " # Run the initializer operation.\n", 114 | " sess.run(init_all_op)\n", 115 | " sess.run(init_custom_op)\n", 116 | " sess.run(init_WeightsNew_op)" 117 | ] 118 | } 119 | ], 120 | "metadata": { 121 | "kernelspec": { 122 | "display_name": "Python 2", 123 | "language": "python", 124 | "name": "python2" 125 | }, 126 | "language_info": { 127 | "codemirror_mode": { 128 | "name": "ipython", 129 | "version": 2 130 | }, 131 | "file_extension": ".py", 132 | "mimetype": "text/x-python", 133 | "name": "python", 134 | "nbconvert_exporter": "python", 135 | "pygments_lexer": "ipython2", 136 | "version": "2.7.13" 137 | } 138 | }, 139 | "nbformat": 4, 140 | "nbformat_minor": 2 141 | } 142 | -------------------------------------------------------------------------------- /codes/ipython/2-basics_in_machine_learning/linear_regression/README.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Linear Regression 3 | ================== 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. The documentation is available `here `_. Alternatively, you can check this ``Linear Regression using TensorFlow`` `blog post `_ for further details. 6 | 7 | .. _blogpostlinearregression: http://www.machinelearninguru.com/deep_learning/tensorflow/machine_learning_basics/linear_regresstion/linear_regression.html 8 | 9 | .. _Documentationlinearregression: https://github.com/astorfi/TensorFlow-World/wiki/Linear-Regeression 10 | 11 | ------------------- 12 | Python Environment 13 | ------------------- 14 | 15 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 16 | 17 | -------------------------------- 18 | How to run the code in Terminal? 19 | -------------------------------- 20 | 21 | 22 | Please root to the ``code/`` directory and run the python script as the general form of below: 23 | 24 | .. code:: shell 25 | 26 | python [python_code_file.py] 27 | 28 | 29 | As an example the code can be executed as follows: 30 | 31 | .. code:: shell 32 | 33 | python linear_regression.py --num_epochs=50 34 | 35 | The ``--num_epochs`` flag is to provide the number of epochs that will be used for training. The ``--num_epochs`` flag is not required because its default value is ``50`` and is provided in the source code as follows: 36 | 37 | .. code:: python 38 | 39 | tf.app.flags.DEFINE_integer( 40 | 'num_epochs', 50, 'The number of epochs for training the model. Default=50') 41 | 42 | ---------------------------- 43 | How to run the code in IDEs? 44 | ---------------------------- 45 | 46 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 47 | -------------------------------------------------------------------------------- /codes/ipython/2-basics_in_machine_learning/linear_regression/updating_model.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/2-basics_in_machine_learning/linear_regression/updating_model.gif -------------------------------------------------------------------------------- /codes/ipython/2-basics_in_machine_learning/linear_svm/README.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Linear SVM 3 | =========== 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. For this tutorial, we will create a linear SVM for separation of the data. The data that is used for this code is linearly separable. 6 | 7 | ------------------- 8 | Python Environment 9 | ------------------- 10 | 11 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 12 | 13 | -------------------------------- 14 | How to run the code in Terminal? 15 | -------------------------------- 16 | 17 | 18 | Please root to the ``code/`` directory and run the python script as the general form of below: 19 | 20 | .. code:: shell 21 | 22 | python [python_code_file.py] 23 | 24 | 25 | As an example the code can be executed as follows: 26 | 27 | .. code:: shell 28 | 29 | python linear_svm.py 30 | 31 | ---------------------------- 32 | How to run the code in IDEs? 33 | ---------------------------- 34 | 35 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 36 | -------------------------------------------------------------------------------- /codes/ipython/2-basics_in_machine_learning/logistic_regression/README.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Logistic Regression 3 | ================== 4 | 5 | This document is dedicated to explaining how to run the python script for this tutorial. ``Logistic regression`` is a binary 6 | classification algorithm in which `yes` or `no` are the only possible responses. The linear output is transformed to a probability of course between zero and 1. The decision is made by thresholding the probability and saying it belongs to which class. We consider ``Softmax`` with ``cross entropy`` loss for minimizing the loss. 7 | 8 | ------------------- 9 | Python Environment 10 | ------------------- 11 | 12 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 13 | 14 | -------------------------------- 15 | How to run the code in Terminal? 16 | -------------------------------- 17 | 18 | Please root to the ``code/`` directory and run the python script as the general form of below: 19 | 20 | .. code:: shell 21 | 22 | python [python_code_file.py] 23 | 24 | 25 | As an example the code can be executed as follows: 26 | 27 | .. code:: shell 28 | 29 | python logistic_regression.py --num_epochs=50 --batch_size=512 --max_num_checkpoint=10 --num_classes=2 30 | 31 | Different ``flags`` are provided for training. For the full list please refer to the source code. The above example is just an example as is! 32 | 33 | ---------------------------- 34 | How to run the code in IDEs? 35 | ---------------------------- 36 | 37 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 38 | -------------------------------------------------------------------------------- /codes/ipython/2-basics_in_machine_learning/multiclass_svm/README.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Multi-Class Kernel SVM 3 | ======================= 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. For this tutorial, we will create a Kernel SVM for separation of the data. The data that is used for this code is MNIST dataset. This document is inspired on `Implementing Multiclass SVMs `_ open source code. However, in ours, we extend it to MNIST dataset and modify its method. 6 | 7 | .. _Multiclasssvm: https://github.com/nfmcclure/tensorflow_cookbook/tree/master/04_Support_Vector_Machines/06_Implementing_Multiclass_SVMs 8 | 9 | 10 | ------------------- 11 | Python Environment 12 | ------------------- 13 | 14 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 15 | 16 | -------------------------------- 17 | How to run the code in Terminal? 18 | -------------------------------- 19 | 20 | 21 | Please root to the ``code/`` directory and run the python script as the general form of below: 22 | 23 | .. code:: shell 24 | 25 | python [python_code_file.py] 26 | 27 | 28 | As an example the code can be executed as follows: 29 | 30 | .. code:: shell 31 | 32 | python multiclass_SVM.py 33 | 34 | ---------------------------- 35 | How to run the code in IDEs? 36 | ---------------------------- 37 | 38 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 39 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/convolutional-neural-network/code/__init__.py -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/auxiliary/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/convolutional-neural-network/code/auxiliary/__init__.py -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/auxiliary/progress_bar.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def print_progress(progress, epoch_num, loss): 5 | """ 6 | This function draw an active progress bar. 7 | :param progress: Where we are: 8 | type: float 9 | value: [0,1] 10 | :param epoch_num: number of epochs for training 11 | :param loss: The loss for the specific batch in training phase. 12 | 13 | :return: Progressing bar 14 | """ 15 | 16 | # Define the length of bar 17 | barLength = 30 18 | 19 | # Ceck the input! 20 | assert type(progress) is float, "id is not a float: %r" % id 21 | assert 0 <= progress <= 1, "variable should be between zero and one!" 22 | 23 | # Empty status while processing. 24 | status = "" 25 | 26 | # This part is to make a new line when the process is finished. 27 | if progress >= 1: 28 | progress = 1 29 | status = "\r\n" 30 | 31 | # Where we are in the progress! 32 | indicator = int(round(barLength*progress)) 33 | 34 | # Print the appropriate progress phase! 35 | list = [str(epoch_num), "#"*indicator , "-"*(barLength-indicator), progress*100, loss, status] 36 | text = "\rEpoch {0[0]} {0[1]} {0[2]} %{0[3]:.2f} loss={0[4]:.3f} {0[5]}".format(list) 37 | sys.stdout.write(text) 38 | sys.stdout.flush() 39 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/auxiliary/progress_bar.py~: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def print_progress(progress, epoch_num, loss): 5 | """ 6 | This function draw an active progress bar. 7 | :param progress: Where we are: 8 | type: float 9 | value: [0,1] 10 | :param epoch_num: number of epochs for training 11 | :param loss: The loss for the specific batch in training phase. 12 | 13 | :return: Progressing bar 14 | """ 15 | 16 | # Define the length of bar 17 | barLength = 30 18 | 19 | # Ceck the input! 20 | assert type(progress) is float, "id is not a float: %r" % id 21 | assert 0 <= progress <= 1, "variable should be between zero and one!" 22 | 23 | # Empty status while processing. 24 | status = "" 25 | 26 | # This part is to make a new line when the process is finished. 27 | if progress >= 1: 28 | progress = 1 29 | status = "\r\n" 30 | 31 | # Where we are in the progress! 32 | indicator = int(round(barLength*progress)) 33 | 34 | # Print the appropriate progress phase! 35 | list = [str(epoch_num), "#"*indicator , "-"*(barLength-indicator), progress*100, loss, status] 36 | text = "\rEpoch {0[0]} {0[1]} {0[2]} %{0[3]:.2f} loss={0[4]:.3f} {0[5]}".format(list) 37 | sys.stdout.write(text) 38 | sys.stdout.flush() 39 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/evaluation.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python test_classifier.py \ 4 | --batch_size=512 \ 5 | --allow_soft_placement 6 | 7 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/input_function/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/convolutional-neural-network/code/input_function/__init__.py -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/input_function/input.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import collections 3 | 4 | 5 | class DATA_OBJECT(object): 6 | def __init__(self, 7 | images, 8 | labels, 9 | num_classes=0, 10 | one_hot=False, 11 | dtype=np.float32, 12 | reshape=False): 13 | """Data object construction. 14 | images: The images of size [num_samples, rows, columns, depth]. 15 | labels: The labels of size [num_samples,] 16 | num_classes: The number of classes in case one_hot labeling is desired. 17 | one_hot=False: Turn the labels into one_hot format. 18 | dtype=np.float32: The data type. 19 | reshape=False: Reshape in case the feature vector extraction is desired. 20 | 21 | """ 22 | # Define the date type. 23 | if dtype not in (np.uint8, np.float32): 24 | raise TypeError('Invalid image dtype %r, expected uint8 or float32' % 25 | dtype) 26 | assert images.shape[0] == labels.shape[0], ( 27 | 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) 28 | self._num_samples = images.shape[0] 29 | 30 | # [num_examples, rows, columns, depth] -> [num_examples, rows*columns] 31 | if reshape: 32 | assert images.shape[3] == 1 33 | images = images.reshape(images.shape[0], 34 | images.shape[1] * images.shape[2]) 35 | 36 | # Conver to float if necessary 37 | if dtype == np.float32: 38 | # Convert from [0, 255] -> [0.0, 1.0]. 39 | images = images.astype(dtype) 40 | images = np.multiply(images, 1.0 / 255.0) 41 | self._images = images 42 | self._labels = labels 43 | 44 | # If the one_hot flag is true, then the one_hot labeling supersedes the normal labeling. 45 | if one_hot: 46 | # If the one_hot labeling is desired, number of classes must be defined as one of the arguments of DATA_OBJECT class! 47 | assert num_classes != 0, ( 48 | 'You must specify the num_classes in the DATA_OBJECT for one_hot label construction!') 49 | 50 | # Define the indexes. 51 | index = np.arange(self._num_samples) * num_classes 52 | one_hot_labels = np.zeros((self._num_samples, num_classes)) 53 | one_hot_labels.flat[index + labels.ravel()] = 1 54 | self._labels = one_hot_labels 55 | 56 | @property 57 | def images(self): 58 | return self._images 59 | 60 | @property 61 | def labels(self): 62 | return self._labels 63 | 64 | @property 65 | def num_samples(self): 66 | return self._num_samples 67 | 68 | 69 | def provide_data(mnist): 70 | """ 71 | This function provide data object with desired shape. 72 | The attribute of data object: 73 | - train 74 | - validation 75 | - test 76 | The sub attributs of the data object attributes: 77 | -images 78 | -labels 79 | 80 | :param mnist: The downloaded MNIST dataset 81 | :return: data: The data object. 82 | ex: data.train.images return the images of the dataset object in the training set! 83 | 84 | 85 | """ 86 | ################################################ 87 | ########## Get the images and labels############ 88 | ################################################ 89 | 90 | # Note: This setup is specific to mnist data but can be generalized for any data. 91 | # The ?_images(? can be train, validation or test) must have the format of [num_samples, rows, columns, depth] after extraction from data. 92 | # The ?_labels(? can be train, validation or test) must have the format of [num_samples,] after extraction from data. 93 | train_images = mnist.train.images 94 | train_labels = mnist.train.labels 95 | validation_images = mnist.validation.images 96 | validation_labels = mnist.validation.labels 97 | test_images = mnist.test.images 98 | test_labels = mnist.test.labels 99 | 100 | # Create separate objects for train, validation & test. 101 | train = DATA_OBJECT(train_images, train_labels, num_classes=10, one_hot=True, dtype=np.float32, reshape=False) 102 | validation = DATA_OBJECT(validation_images, validation_labels, num_classes=10, one_hot=True, dtype=np.float32, 103 | reshape=False) 104 | test = DATA_OBJECT(test_images, test_labels, num_classes=10, one_hot=True, dtype=np.float32, reshape=False) 105 | 106 | # Create the whole data object 107 | DataSetObject = collections.namedtuple('DataSetObject', ['train', 'validation', 'test']) 108 | data = DataSetObject(train=train, validation=validation, test=test) 109 | 110 | return data 111 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/net_structure/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/convolutional-neural-network/code/net_structure/__init__.py -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/net_structure/net.py: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # With some tiny modification, this code is the one used by Tensorflow slim at: 3 | # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim 4 | # Please refer to the link for further explanations. 5 | 6 | ### The difference is this architecture is written in fully-convolutional fashion. 7 | ### The advantage is that, this model can be used for larger image sizes with some average pooling in the last layer. 8 | 9 | import tensorflow as tf 10 | 11 | slim = tf.contrib.slim 12 | 13 | 14 | def net_architecture(images, num_classes=10, is_training=False, 15 | dropout_keep_prob=0.5, 16 | spatial_squeeze=True, 17 | scope='Net'): 18 | """Creates a variant of the Net model. 19 | 20 | Args: 21 | images: The batch of `Tensors`: size [batch_size, height, width, channels]. 22 | num_classes: Total number of classes. 23 | is_training: Training/Validation. 24 | dropout_keep_prob: The percentage of activation values: Only active in training mode! 25 | scope: Variable_scope. 26 | 27 | Returns: 28 | logits: the pre-softmax activations of size [batch_size, `num_classes`] 29 | end_points: The dictionary for the layers outputs. 30 | """ 31 | 32 | # Create empty dictionary 33 | end_points = {} 34 | 35 | with tf.variable_scope(scope, 'Net', [images, num_classes]) as sc: 36 | end_points_collection = sc.name + '_end_points' 37 | 38 | # Collect outputs for conv2d and max_pool2d. 39 | with tf.contrib.framework.arg_scope([tf.contrib.layers.conv2d, tf.contrib.layers.max_pool2d], 40 | outputs_collections=end_points_collection): 41 | # Layer-1 42 | net = tf.contrib.layers.conv2d(images, 32, [5, 5], scope='conv1') 43 | net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool1') 44 | 45 | # Layer-2 46 | net = tf.contrib.layers.conv2d(net, 64, [5, 5], scope='conv2') 47 | net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool2') 48 | 49 | # Layer-3 50 | net = tf.contrib.layers.conv2d(net, 1024, [7, 7], padding='VALID', scope='fc3') 51 | net = tf.contrib.layers.dropout(net, dropout_keep_prob, is_training=is_training, 52 | scope='dropout3') 53 | 54 | # Last layer which is the logits for classes 55 | logits = tf.contrib.layers.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='fc4') 56 | 57 | # Return the collections as a dictionary 58 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 59 | 60 | # Squeeze spatially to eliminate extra dimensions.(embedding layer) 61 | if spatial_squeeze: 62 | logits = tf.squeeze(logits, [1, 2], name='fc4/squeezed') 63 | end_points[sc.name + '/fc4'] = logits 64 | return logits, end_points 65 | 66 | 67 | def net_arg_scope(weight_decay=0.0005, is_training=False): 68 | """Defines the default network argument scope. 69 | 70 | Args: 71 | weight_decay: The weight decay to use for regularizing the model. 72 | 73 | Returns: 74 | An `arg_scope` to use for the model. 75 | """ 76 | if is_training: 77 | with tf.contrib.framework.arg_scope( 78 | [tf.contrib.layers.conv2d], 79 | padding='SAME', 80 | weights_regularizer=slim.l2_regularizer(weight_decay), 81 | weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', 82 | uniform=False, seed=None, 83 | dtype=tf.float32), 84 | activation_fn=tf.nn.relu) as sc: 85 | return sc 86 | 87 | else: 88 | with tf.contrib.framework.arg_scope( 89 | [tf.contrib.layers.conv2d], 90 | padding='SAME', 91 | activation_fn=tf.nn.relu) as sc: 92 | return sc 93 | 94 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/script/evaluation.sh: -------------------------------------------------------------------------------- 1 | # Where the logs will be saved to. 2 | test_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/test_logs 3 | 4 | # Where the checkpoints is saved to. 5 | checkpoint_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/checkpoints 6 | 7 | 8 | # Run training. 9 | python test_classifier.py \ 10 | --test_dir=${test_dir} \ 11 | --checkpoint_dir=${checkpoint_dir} \ 12 | --batch_size=512 \ 13 | --num_epochs=2 \ 14 | --allow_soft_placement 15 | 16 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/script/train.sh: -------------------------------------------------------------------------------- 1 | # Where the logs will be saved to. 2 | train_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/train_logs 3 | 4 | # Where the checkpoints is saved to. 5 | checkpoint_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/checkpoints 6 | 7 | 8 | # Run training. 9 | python train_classifier.py \ 10 | --train_dir=${train_dir} \ 11 | --checkpoint_dir=${checkpoint_dir} \ 12 | --batch_size=512 \ 13 | --num_epochs=20 \ 14 | --max_num_checkpoint=10 \ 15 | --is_training \ 16 | --allow_soft_placement 17 | 18 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/train.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python train_classifier.py \ 4 | --batch_size=512 \ 5 | --max_num_checkpoint=10 \ 6 | --num_classes=10 \ 7 | --num_epochs=1 \ 8 | --initial_learning_rate=0.001 \ 9 | --num_epochs_per_decay=1 \ 10 | --is_training=True \ 11 | --allow_soft_placement=True \ 12 | --fine_tuning=False \ 13 | --online_test=True \ 14 | --log_device_placement=False 15 | 16 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/code/train_evaluation.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import tensorflow as tf 3 | import numpy as np 4 | from auxiliary import progress_bar 5 | import os 6 | import sys 7 | 8 | 9 | def train(**keywords): 10 | """ 11 | This function run the session whether in training or evaluation mode. 12 | NOTE: **keywords is defined in order to make the code easily changable. 13 | WARNING: All the arguments for the **keywords must be defined when calling this function. 14 | **keywords: 15 | :param sess: The default session. 16 | :param saver: The saver operator to save and load the model weights. 17 | :param tensors: The tensors dictionary defined by the graph. 18 | :param data: The data structure. 19 | :param train_dir: The training dir which is a reference for saving the logs and model checkpoints. 20 | :param finetuning: If fine tuning should be done or random initialization is needed. 21 | :param num_epochs: Number of epochs for training. 22 | :param online_test: If the testing is done while training. 23 | :param checkpoint_dir: The directory of the checkpoints. 24 | :param batch_size: The training batch size. 25 | 26 | :return: 27 | Run the session. 28 | """ 29 | 30 | # The prefix for checkpoint files 31 | checkpoint_prefix = 'model' 32 | 33 | ################################################################### 34 | ########## Defining the summary writers for train/test ########### 35 | ################################################################### 36 | 37 | train_summary_dir = os.path.join(keywords['train_dir'], "summaries", "train") 38 | train_summary_writer = tf.summary.FileWriter(train_summary_dir) 39 | train_summary_writer.add_graph(keywords['sess'].graph) 40 | 41 | test_summary_dir = os.path.join(keywords['train_dir'], "summaries", "test") 42 | test_summary_writer = tf.summary.FileWriter(test_summary_dir) 43 | test_summary_writer.add_graph(keywords['sess'].graph) 44 | 45 | # If fie-tuning flag in 'True' the model will be restored. 46 | if keywords['finetuning']: 47 | keywords['saver'].restore(keywords['sess'], os.path.join(keywords['checkpoint_dir'], checkpoint_prefix)) 48 | print("Model restored for fine-tuning...") 49 | 50 | ################################################################### 51 | ########## Run the training and loop over the batches ############# 52 | ################################################################### 53 | for epoch in range(keywords['num_epochs']): 54 | total_batch_training = int(keywords['data'].train.images.shape[0] / keywords['batch_size']) 55 | 56 | # go through the batches 57 | for batch_num in range(total_batch_training): 58 | ################################################# 59 | ########## Get the training batches ############# 60 | ################################################# 61 | 62 | start_idx = batch_num * keywords['batch_size'] 63 | end_idx = (batch_num + 1) * keywords['batch_size'] 64 | 65 | # Fit training using batch data 66 | train_batch_data, train_batch_label = keywords['data'].train.images[start_idx:end_idx], keywords[ 67 | 'data'].train.labels[ 68 | start_idx:end_idx] 69 | 70 | ######################################## 71 | ########## Run the session ############# 72 | ######################################## 73 | 74 | # Run optimization op (backprop) and Calculate batch loss and accuracy 75 | # When the tensor tensors['global_step'] is evaluated, it will be incremented by one. 76 | batch_loss, _, train_summaries, training_step = keywords['sess'].run( 77 | [keywords['tensors']['cost'], keywords['tensors']['train_op'], keywords['tensors']['summary_train_op'], 78 | keywords['tensors']['global_step']], 79 | feed_dict={keywords['tensors']['image_place']: train_batch_data, 80 | keywords['tensors']['label_place']: train_batch_label, 81 | keywords['tensors']['dropout_param']: 0.5}) 82 | 83 | ######################################## 84 | ########## Write summaries ############# 85 | ######################################## 86 | 87 | # Write the summaries 88 | train_summary_writer.add_summary(train_summaries, global_step=training_step) 89 | 90 | # # Write the specific summaries for training phase. 91 | # train_summary_writer.add_summary(train_image_summary, global_step=training_step) 92 | 93 | ################################################# 94 | ########## Plot the progressive bar ############# 95 | ################################################# 96 | 97 | progress = float(batch_num + 1) / total_batch_training 98 | progress_bar.print_progress(progress, epoch_num=epoch + 1, loss=batch_loss) 99 | 100 | # ################################################################ 101 | # ############ Summaries per epoch of training ################### 102 | # ################################################################ 103 | summary_epoch_train_op = keywords['tensors']['summary_epoch_train_op'] 104 | train_epoch_summaries = keywords['sess'].run(summary_epoch_train_op, 105 | feed_dict={keywords['tensors']['image_place']: train_batch_data, 106 | keywords['tensors']['label_place']: train_batch_label, 107 | keywords['tensors']['dropout_param']: 1.0}) 108 | 109 | # Put the summaries to the train summary writer. 110 | train_summary_writer.add_summary(train_epoch_summaries, global_step=training_step) 111 | 112 | ##################################################### 113 | ########## Evaluation on the test data ############# 114 | ##################################################### 115 | 116 | if keywords['online_test']: 117 | # WARNING: In this evaluation the whole test data is fed. In case the test data is huge this implementation 118 | # may lead to memory error. In presense of large testing samples, batch evaluation on testing is 119 | # recommended as in the training phase. 120 | test_accuracy_epoch, test_summaries = keywords['sess'].run( 121 | [keywords['tensors']['accuracy'], keywords['tensors']['summary_test_op']], 122 | feed_dict={keywords['tensors']['image_place']: keywords['data'].test.images, 123 | keywords['tensors'][ 124 | 'label_place']: keywords['data'].test.labels, 125 | keywords['tensors'][ 126 | 'dropout_param']: 1.}) 127 | print("Epoch " + str(epoch + 1) + ", Testing Accuracy= " + \ 128 | "{:.5f}".format(test_accuracy_epoch)) 129 | 130 | ########################################################### 131 | ########## Write the summaries for test phase ############# 132 | ########################################################### 133 | 134 | # Returning the value of global_step if necessary 135 | current_step = tf.train.global_step(keywords['sess'], keywords['tensors']['global_step']) 136 | 137 | # Add the couter of global step for proper scaling between train and test summuries. 138 | test_summary_writer.add_summary(test_summaries, global_step=current_step) 139 | 140 | ########################################################### 141 | ############ Saving the model checkpoint ################## 142 | ########################################################### 143 | 144 | # # The model will be saved when the training is done. 145 | 146 | # Create the path for saving the checkpoints. 147 | if not os.path.exists(keywords['checkpoint_dir']): 148 | os.makedirs(keywords['checkpoint_dir']) 149 | 150 | # save the model 151 | save_path = keywords['saver'].save(keywords['sess'], os.path.join(keywords['checkpoint_dir'], checkpoint_prefix)) 152 | print("Model saved in file: %s" % save_path) 153 | 154 | 155 | ############################################################################ 156 | ########## Run the session for pur evaluation on the test data ############# 157 | ############################################################################ 158 | 159 | 160 | def evaluation(**keywords): 161 | # The prefix for checkpoint files 162 | checkpoint_prefix = 'model' 163 | 164 | # Get the input arguments 165 | saver = keywords['saver'] 166 | sess = keywords['sess'] 167 | checkpoint_dir = keywords['checkpoint_dir'] 168 | data = keywords['data'] 169 | accuracy_tensor = keywords['tensors']['accuracy'] 170 | image_place = keywords['tensors']['image_place'] 171 | label_place = keywords['tensors']['label_place'] 172 | dropout_param = keywords['tensors']['dropout_param'] 173 | 174 | 175 | # Restoring the saved weights. 176 | saver.restore(sess, os.path.join(checkpoint_dir, checkpoint_prefix)) 177 | print("Model restored...") 178 | 179 | test_set = data.test.images 180 | test_label = data.test.labels 181 | # Evaluation of the model 182 | test_accuracy = 100 * keywords['sess'].run(accuracy_tensor, feed_dict={ 183 | image_place: test_set, 184 | label_place: test_label, 185 | dropout_param: 1.}) 186 | 187 | print("Final Test Accuracy is %% %.2f" % test_accuracy) 188 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Convolutional Neural Network 3 | ============================== 4 | 5 | This is the code repository for the blog post `Train a Convolutional Neural Network as a Classifier`__. Please refer to this `wiki page`__ for more details. 6 | 7 | .. _cnn_classifier_post: http://machinelearninguru.com/deep_learning/tensorflow/neural_networks/cnn_classifier/cnn_classifier.html 8 | __ cnn_classifier_post_ 9 | 10 | .. _cnn_classifier_wiki: https://github.com/astorfi/TensorFlow-World/wiki/Convolutional-Neural-Networks 11 | __ cnn_classifier_wiki_ 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------- 19 | Training 20 | -------- 21 | 22 | **Train:** 23 | 24 | The training can be run using the **train.sh** `bash script` file using the following command: 25 | 26 | .. code-block:: bash 27 | 28 | ./train.sh 29 | 30 | The bash script is as below: 31 | 32 | 33 | .. code-block:: bash 34 | 35 | python train_classifier.py \ 36 | --batch_size=512 \ 37 | --max_num_checkpoint=10 \ 38 | --num_classes=10 \ 39 | --num_epochs=1 \ 40 | --initial_learning_rate=0.001 \ 41 | --num_epochs_per_decay=1 \ 42 | --is_training=True \ 43 | --allow_soft_placement=True \ 44 | --fine_tuning=False \ 45 | --online_test=True \ 46 | --log_device_placement=False 47 | 48 | **helper:** 49 | 50 | In order to realize that what are the parameters as input running the following command is recommended: 51 | 52 | .. code-block:: bash 53 | 54 | python train_classifier.py --help 55 | 56 | 57 | In which `train_classifier.py` is the main file for running the training. The result of the above command will be as below: 58 | 59 | .. code-block:: bash 60 | 61 | --train_dir TRAIN_DIR 62 | Directory where event logs are written to. 63 | --checkpoint_dir CHECKPOINT_DIR 64 | Directory where checkpoints are written to. 65 | --max_num_checkpoint MAX_NUM_CHECKPOINT 66 | Maximum number of checkpoints that TensorFlow will 67 | keep. 68 | --num_classes NUM_CLASSES 69 | Number of model clones to deploy. 70 | --batch_size BATCH_SIZE 71 | Number of model clones to deploy. 72 | --num_epochs NUM_EPOCHS 73 | Number of epochs for training. 74 | --initial_learning_rate INITIAL_LEARNING_RATE 75 | Initial learning rate. 76 | --learning_rate_decay_factor LEARNING_RATE_DECAY_FACTOR 77 | Learning rate decay factor. 78 | --num_epochs_per_decay NUM_EPOCHS_PER_DECAY 79 | Number of epoch pass to decay learning rate. 80 | --is_training [IS_TRAINING] 81 | Training/Testing. 82 | --fine_tuning [FINE_TUNING] 83 | Fine tuning is desired or not?. 84 | --online_test [ONLINE_TEST] 85 | Fine tuning is desired or not?. 86 | --allow_soft_placement [ALLOW_SOFT_PLACEMENT] 87 | Automatically put the variables on CPU if there is no 88 | GPU support. 89 | --log_device_placement [LOG_DEVICE_PLACEMENT] 90 | Demonstrate which variables are on what device. 91 | 92 | 93 | ----------- 94 | Evaluation 95 | ----------- 96 | 97 | The evaluation will be run using the **evaluation.sh** `bash script` file using the following command: 98 | 99 | .. code-block:: bash 100 | 101 | ./evaluation.sh 102 | 103 | 104 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/readme.rst~: -------------------------------------------------------------------------------- 1 | ============================== 2 | Convolutional Neural Network 3 | ============================== 4 | 5 | This is the code repository for the blog post `Train a Convolutional Neural Network as a Classifier`__. Please refer to this `wiki page`__ for more details. 6 | 7 | .. _cnn_classifier_post: http://machinelearninguru.com/deep_learning/tensorflow/neural_networks/cnn_classifier/cnn_classifier.html 8 | __ cnn_classifier_post_ 9 | 10 | .. _cnn_classifier_wiki: https://github.com/astorfi/TensorFlow-World/wiki/Convolutional-Neural-Networks 11 | __ cnn_classifier_wiki_ 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------- 19 | Training 20 | -------- 21 | 22 | **Train:** 23 | 24 | The traing can be run using the **train.sh** `bash script` file using the following command: 25 | 26 | .. code-block:: bash 27 | 28 | ./train.sh 29 | 30 | The bash script is as below: 31 | 32 | 33 | .. code-block:: bash 34 | 35 | python train_classifier.py \ 36 | --batch_size=512 \ 37 | --max_num_checkpoint=10 \ 38 | --num_classes=10 \ 39 | --num_epochs=1 \ 40 | --initial_learning_rate=0.001 \ 41 | --num_epochs_per_decay=1 \ 42 | --is_training=True \ 43 | --allow_soft_placement=True \ 44 | --fine_tuning=False \ 45 | --online_test=True \ 46 | --log_device_placement=False 47 | 48 | **helper:** 49 | 50 | In order to realize that what are the parameters as input running the following command is recommended: 51 | 52 | .. code-block:: bash 53 | 54 | python train_classifier.py --help 55 | 56 | 57 | In which `train_classifier.py` is the main file for running the training. The result of the above command will be as below: 58 | 59 | .. code-block:: bash 60 | 61 | --train_dir TRAIN_DIR 62 | Directory where event logs are written to. 63 | --checkpoint_dir CHECKPOINT_DIR 64 | Directory where checkpoints are written to. 65 | --max_num_checkpoint MAX_NUM_CHECKPOINT 66 | Maximum number of checkpoints that TensorFlow will 67 | keep. 68 | --num_classes NUM_CLASSES 69 | Number of model clones to deploy. 70 | --batch_size BATCH_SIZE 71 | Number of model clones to deploy. 72 | --num_epochs NUM_EPOCHS 73 | Number of epochs for training. 74 | --initial_learning_rate INITIAL_LEARNING_RATE 75 | Initial learning rate. 76 | --learning_rate_decay_factor LEARNING_RATE_DECAY_FACTOR 77 | Learning rate decay factor. 78 | --num_epochs_per_decay NUM_EPOCHS_PER_DECAY 79 | Number of epoch pass to decay learning rate. 80 | --is_training [IS_TRAINING] 81 | Training/Testing. 82 | --fine_tuning [FINE_TUNING] 83 | Fine tuning is desired or not?. 84 | --online_test [ONLINE_TEST] 85 | Fine tuning is desired or not?. 86 | --allow_soft_placement [ALLOW_SOFT_PLACEMENT] 87 | Automatically put the variables on CPU if there is no 88 | GPU support. 89 | --log_device_placement [LOG_DEVICE_PLACEMENT] 90 | Demonstrate which variables are on what device. 91 | 92 | 93 | ----------- 94 | Evaluation 95 | ----------- 96 | 97 | The evaluation will be run using the **evaluation.sh** `bash script` file using the following command: 98 | 99 | .. code-block:: bash 100 | 101 | ./evaluation.sh 102 | 103 | 104 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/convolutional-neural-network/requirements.txt: -------------------------------------------------------------------------------- 1 | - TensorFLow 1.0 2 | - numpy 3 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/multi-layer-perceptron/code/evaluation.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python test_classifier.py \ 4 | --batch_size=512 \ 5 | --allow_soft_placement 6 | 7 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/multi-layer-perceptron/code/train.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python train_mlp.py \ 4 | --batch_size=512 \ 5 | --max_num_checkpoint=10 \ 6 | --num_classes=10 \ 7 | --num_epochs=1 \ 8 | --initial_learning_rate=0.001 \ 9 | --num_epochs_per_decay=1 \ 10 | --is_training=True \ 11 | --allow_soft_placement=True \ 12 | --fine_tuning=False \ 13 | --online_test=True \ 14 | --log_device_placement=False 15 | 16 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/multi-layer-perceptron/readme.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Multi Layer Perceptron 3 | ========================= 4 | 5 | This code is developed for training a ``Multi Layer Perceptron`` architecture in which the input will be feed-forwarded to the network that contains some hidden layers. 6 | 7 | .. figure:: https://github.com/astorfi/TensorFlow-World/blob/master/docs/_img/3-neural_network/multi-layer-perceptron/neural-network.png 8 | :scale: 50 % 9 | :align: center 10 | 11 | 12 | -------- 13 | Training 14 | -------- 15 | 16 | **Train:** 17 | 18 | The training can be run using the **train.sh** `bash script` file using the following command: 19 | 20 | .. code-block:: bash 21 | 22 | ./train.sh 23 | 24 | The bash script is as below: 25 | 26 | 27 | .. code-block:: bash 28 | 29 | python train_mlp.py \ 30 | --batch_size=512 \ 31 | --max_num_checkpoint=10 \ 32 | --num_classes=10 \ 33 | --num_epochs=1 \ 34 | --initial_learning_rate=0.001 \ 35 | --num_epochs_per_decay=1 \ 36 | --is_training=True \ 37 | --allow_soft_placement=True \ 38 | --fine_tuning=False \ 39 | --online_test=True \ 40 | --log_device_placement=False 41 | 42 | **helper:** 43 | 44 | In order to realize that what are the parameters as input running the following command is recommended: 45 | 46 | .. code-block:: bash 47 | 48 | python train_mlp.py --help 49 | 50 | 51 | In which `train_mlp.py` is the main file for running the training. The result of the above command will be as below: 52 | 53 | .. code-block:: bash 54 | 55 | --train_dir TRAIN_DIR 56 | Directory where event logs are written to. 57 | --checkpoint_dir CHECKPOINT_DIR 58 | Directory where checkpoints are written to. 59 | --max_num_checkpoint MAX_NUM_CHECKPOINT 60 | Maximum number of checkpoints that TensorFlow will 61 | keep. 62 | --num_classes NUM_CLASSES 63 | Number of model clones to deploy. 64 | --batch_size BATCH_SIZE 65 | Number of model clones to deploy. 66 | --num_epochs NUM_EPOCHS 67 | Number of epochs for training. 68 | --initial_learning_rate INITIAL_LEARNING_RATE 69 | Initial learning rate. 70 | --learning_rate_decay_factor LEARNING_RATE_DECAY_FACTOR 71 | Learning rate decay factor. 72 | --num_epochs_per_decay NUM_EPOCHS_PER_DECAY 73 | Number of epoch pass to decay learning rate. 74 | --is_training [IS_TRAINING] 75 | Training/Testing. 76 | --fine_tuning [FINE_TUNING] 77 | Fine tuning is desired or not?. 78 | --online_test [ONLINE_TEST] 79 | Fine tuning is desired or not?. 80 | --allow_soft_placement [ALLOW_SOFT_PLACEMENT] 81 | Automatically put the variables on CPU if there is no 82 | GPU support. 83 | --log_device_placement [LOG_DEVICE_PLACEMENT] 84 | Demonstrate which variables are on what device. 85 | 86 | 87 | ----------- 88 | Evaluation 89 | ----------- 90 | 91 | The evaluation will be run using the **evaluation.sh** `bash script` file using the following command: 92 | 93 | .. code-block:: bash 94 | 95 | ./evaluation.sh 96 | 97 | 98 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/multi-layer-perceptron/requirements.txt: -------------------------------------------------------------------------------- 1 | - TensorFLow 1.0 2 | - numpy 3 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/readme.md: -------------------------------------------------------------------------------- 1 | The tutorial in TensorFlow related to Neural Network architectures. 2 | -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/README.rst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/recurrent-neural-networks/README.rst -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/ipython/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /codes/ipython/3-neural_networks/recurrent-neural-networks/code/rnn.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Untitled0.ipynb", 7 | "version": "0.3.2", 8 | "provenance": [], 9 | "collapsed_sections": [] 10 | }, 11 | "kernelspec": { 12 | "name": "python3", 13 | "display_name": "Python 3" 14 | }, 15 | "accelerator": "GPU" 16 | }, 17 | "cells": [ 18 | { 19 | "metadata": { 20 | "id": "UBpbr4JZKYTz", 21 | "colab_type": "text" 22 | }, 23 | "cell_type": "markdown", 24 | "source": [ 25 | "# MNIST Digit Classification Using Recurrent Neural Networks" 26 | ] 27 | }, 28 | { 29 | "metadata": { 30 | "id": "CxeZAiQkLMNR", 31 | "colab_type": "code", 32 | "colab": { 33 | "base_uri": "https://localhost:8080/", 34 | "height": 704 35 | }, 36 | "outputId": "77ad5785-4e41-40d0-896e-ed9e6873b4ae" 37 | }, 38 | "cell_type": "code", 39 | "source": [ 40 | "import tensorflow as tf\n", 41 | "import numpy as np\n", 42 | "import matplotlib.pyplot as plt\n", 43 | "import argparse\n", 44 | "\n", 45 | "######################\n", 46 | "# Optimization Flags #\n", 47 | "######################\n", 48 | "\n", 49 | "learning_rate = 0.001 # initial learning rate\n", 50 | "seed = 111\n", 51 | "\n", 52 | "##################\n", 53 | "# Training Flags #\n", 54 | "##################\n", 55 | "batch_size = 128 # Batch size for training\n", 56 | "num_epoch = 10 # Number of training iterations\n", 57 | "\n", 58 | "###############\n", 59 | "# Model Flags #\n", 60 | "###############\n", 61 | "hidden_size = 128 # Number of neurons for RNN hodden layer\n", 62 | "\n", 63 | "# Reset the graph set the random numbers to be the same using \"seed\"\n", 64 | "tf.reset_default_graph()\n", 65 | "tf.set_random_seed(seed)\n", 66 | "np.random.seed(seed)\n", 67 | "\n", 68 | "# Divide 28x28 images to rows of data to feed to RNN as sequantial information\n", 69 | "step_size = 28\n", 70 | "input_size = 28\n", 71 | "output_size = 10\n", 72 | "\n", 73 | "# Input tensors\n", 74 | "X = tf.placeholder(tf.float32, [None, step_size, input_size])\n", 75 | "y = tf.placeholder(tf.int32, [None])\n", 76 | "\n", 77 | "# Rnn\n", 78 | "cell = tf.nn.rnn_cell.BasicRNNCell(num_units=hidden_size)\n", 79 | "output, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n", 80 | "\n", 81 | "# Forward pass and loss calcualtion\n", 82 | "logits = tf.layers.dense(state, output_size)\n", 83 | "cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n", 84 | "loss = tf.reduce_mean(cross_entropy)\n", 85 | "\n", 86 | "# optimizer\n", 87 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n", 88 | "\n", 89 | "# Prediction\n", 90 | "prediction = tf.nn.in_top_k(logits, y, 1)\n", 91 | "accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\n", 92 | "\n", 93 | "# input data\n", 94 | "from tensorflow.examples.tutorials.mnist import input_data\n", 95 | "mnist = input_data.read_data_sets(\"MNIST_data/\")\n", 96 | "\n", 97 | "# Process MNIST\n", 98 | "X_test = mnist.test.images # X_test shape: [num_test, 28*28]\n", 99 | "X_test = X_test.reshape([-1, step_size, input_size])\n", 100 | "y_test = mnist.test.labels\n", 101 | "\n", 102 | "# initialize the variables\n", 103 | "init = tf.global_variables_initializer()\n", 104 | "\n", 105 | "# Empty list for tracking\n", 106 | "loss_train_list = []\n", 107 | "acc_train_list = []\n", 108 | "\n", 109 | "# train the model\n", 110 | "with tf.Session() as sess:\n", 111 | " sess.run(init)\n", 112 | " n_batches = mnist.train.num_examples // batch_size\n", 113 | " for epoch in range(num_epoch):\n", 114 | " for batch in range(n_batches):\n", 115 | " X_train, y_train = mnist.train.next_batch(batch_size)\n", 116 | " X_train = X_train.reshape([-1, step_size, input_size])\n", 117 | " sess.run(optimizer, feed_dict={X: X_train, y: y_train})\n", 118 | " loss_train, acc_train = sess.run(\n", 119 | " [loss, accuracy], feed_dict={X: X_train, y: y_train})\n", 120 | " loss_train_list.append(loss_train)\n", 121 | " acc_train_list.append(acc_train)\n", 122 | " print('Epoch: {}, Train Loss: {:.3f}, Train Acc: {:.3f}'.format(\n", 123 | " epoch + 1, loss_train, acc_train))\n", 124 | " loss_test, acc_test = sess.run(\n", 125 | " [loss, accuracy], feed_dict={X: X_test, y: y_test})\n", 126 | " print('Test Loss: {:.3f}, Test Acc: {:.3f}'.format(loss_test, acc_test))\n" 127 | ], 128 | "execution_count": 2, 129 | "outputs": [ 130 | { 131 | "output_type": "stream", 132 | "text": [ 133 | "WARNING:tensorflow:From :56: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 134 | "Instructions for updating:\n", 135 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 136 | "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 137 | "Instructions for updating:\n", 138 | "Please write your own downloading logic.\n", 139 | "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:252: _internal_retry..wrap..wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 140 | "Instructions for updating:\n", 141 | "Please use urllib or similar directly.\n", 142 | "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n", 143 | "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 144 | "Instructions for updating:\n", 145 | "Please use tf.data to implement this functionality.\n", 146 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", 147 | "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n", 148 | "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 149 | "Instructions for updating:\n", 150 | "Please use tf.data to implement this functionality.\n", 151 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", 152 | "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n", 153 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", 154 | "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n", 155 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n", 156 | "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 157 | "Instructions for updating:\n", 158 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 159 | "Epoch: 1, Train Loss: 0.279, Train Acc: 0.898\n", 160 | "Epoch: 2, Train Loss: 0.124, Train Acc: 0.969\n", 161 | "Epoch: 3, Train Loss: 0.145, Train Acc: 0.977\n", 162 | "Epoch: 4, Train Loss: 0.231, Train Acc: 0.914\n", 163 | "Epoch: 5, Train Loss: 0.088, Train Acc: 0.961\n", 164 | "Epoch: 6, Train Loss: 0.104, Train Acc: 0.961\n", 165 | "Epoch: 7, Train Loss: 0.174, Train Acc: 0.961\n", 166 | "Epoch: 8, Train Loss: 0.099, Train Acc: 0.961\n", 167 | "Epoch: 9, Train Loss: 0.075, Train Acc: 0.961\n", 168 | "Epoch: 10, Train Loss: 0.081, Train Acc: 0.969\n", 169 | "Test Loss: 0.124, Test Acc: 0.965\n" 170 | ], 171 | "name": "stdout" 172 | } 173 | ] 174 | }, 175 | { 176 | "metadata": { 177 | "id": "nkPppIILLN5Z", 178 | "colab_type": "code", 179 | "colab": {} 180 | }, 181 | "cell_type": "code", 182 | "source": [ 183 | "" 184 | ], 185 | "execution_count": 0, 186 | "outputs": [] 187 | } 188 | ] 189 | } -------------------------------------------------------------------------------- /codes/python/0-welcome/README.rst: -------------------------------------------------------------------------------- 1 | 2 | =========================== 3 | Welcome to TensorFlow World 4 | =========================== 5 | 6 | This document is dedicated to explain how to run the python script for this tutorial. 7 | 8 | --------------------------- 9 | Test TensorFlow Environment 10 | --------------------------- 11 | 12 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 13 | 14 | .. code:: shell 15 | 16 | cd code/ 17 | python TensorFlow_Test.py 18 | 19 | -------------------------------- 20 | How to run the code in Terminal? 21 | -------------------------------- 22 | 23 | 24 | Please root to the ``code/`` directory and run the python script as the general form of below: 25 | 26 | .. code:: shell 27 | 28 | python [python_code_file.py] --log_dir='absolute/path/to/log_dir' 29 | 30 | 31 | As an example the code can be executed as follows: 32 | 33 | .. code:: shell 34 | 35 | python 1-welcome.py --log_dir='~/log_dir' 36 | 37 | The ``--log_dir`` flag is to provide the address which the event files (for visualizing in Tensorboard) will be saved. The flag of ``--log_dir`` is not required because its default value is available in the source code as follows: 38 | 39 | .. code:: python 40 | 41 | tf.app.flags.DEFINE_string( 42 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 43 | 'Directory where event logs are written to.') 44 | 45 | ---------------------------- 46 | How to run the code in IDEs? 47 | ---------------------------- 48 | 49 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 50 | 51 | 52 | ---------------------------- 53 | How to run the Tensorboard? 54 | ---------------------------- 55 | .. _Google’s words: https://www.tensorflow.org/get_started/summaries_and_tensorboard 56 | TensorBoard is the graph visualization tools provided by TensorFlow. Using `Google’s words`_: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, 57 | debug, and optimize TensorFlow programs, we've included a suite of visualization tools called 58 | TensorBoard.” 59 | 60 | The Tensorboard can be run as follows in the terminal: 61 | 62 | .. code:: shell 63 | 64 | tensorboard --logdir="absolute/path/to/log_dir" 65 | 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /codes/python/0-welcome/code/0-welcome.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | ########## Welcome to TensorFlow World ############## 3 | ##################################################### 4 | 5 | # The tutorials in this section is just a start for going into TensorFlow world. 6 | # The TensorFlow flags are used for having a more user friendly environment. 7 | 8 | from __future__ import print_function 9 | import tensorflow as tf 10 | import os 11 | 12 | 13 | ###################################### 14 | ######### Necessary Flags ############ 15 | # #################################### 16 | 17 | log_dir = os.path.dirname(os.path.abspath(__file__)) + '/logs' 18 | 19 | ################################################ 20 | ################# handling errors!############## 21 | ################################################ 22 | 23 | # Defining some sentence! 24 | welcome = tf.constant('Welcome to TensorFlow world!') 25 | 26 | # Run the session 27 | with tf.Session() as sess: 28 | writer = tf.summary.FileWriter(os.path.expanduser(log_dir), sess.graph) 29 | print("output: ", sess.run(welcome)) 30 | 31 | # Closing the writer. 32 | writer.close() 33 | sess.close() 34 | 35 | 36 | -------------------------------------------------------------------------------- /codes/python/0-welcome/code/TensorFlow_Test.py: -------------------------------------------------------------------------------- 1 | # This code has been provided by TensorFlow. 2 | # Please refer to: https://www.tensorflow.org/api_guides/python/test 3 | 4 | import tensorflow as tf 5 | 6 | class SquareTest(tf.test.TestCase): 7 | 8 | def testSquare(self): 9 | with self.test_session(): 10 | x = tf.square([2, 3]) 11 | self.assertAllEqual(x.eval(), [4, 9]) 12 | 13 | if __name__ == '__main__': 14 | tf.test.main() 15 | -------------------------------------------------------------------------------- /codes/python/1-basics/basic_math_operations/README.rst: -------------------------------------------------------------------------------- 1 | 2 | =========================== 3 | Welcome to TensorFlow World 4 | =========================== 5 | 6 | This document is dedicated to explain how to run the python script for this tutorial. 7 | 8 | 9 | -------------------------------- 10 | How to run the code in Terminal? 11 | -------------------------------- 12 | 13 | 14 | Please root to the ``code/`` directory and run the python script as the general form of below: 15 | 16 | .. code:: shell 17 | 18 | python [python_code_file.py] --log_dir='absolute/path/to/log_dir' 19 | 20 | 21 | As an example the code can be executed as follows: 22 | 23 | .. code:: shell 24 | 25 | python 1-welcome.py --log_dir='~/log_dir' 26 | 27 | The ``--log_dir`` flag is to provide the address which the event files (for visualizing in Tensorboard) will be saved. The flag of ``--log_dir`` is not required because its default value is available in the source code as follows: 28 | 29 | .. code:: python 30 | 31 | tf.app.flags.DEFINE_string( 32 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 33 | 'Directory where event logs are written to.') 34 | 35 | ---------------------------- 36 | How to run the code in IDEs? 37 | ---------------------------- 38 | 39 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 40 | 41 | 42 | ---------------------------- 43 | How to run the Tensorboard? 44 | ---------------------------- 45 | .. _Google’s words: https://www.tensorflow.org/get_started/summaries_and_tensorboard 46 | TensorBoard is the graph visualization tools provided by TensorFlow. Using `Google’s words`_: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, 47 | debug, and optimize TensorFlow programs, we've included a suite of visualization tools called 48 | TensorBoard.” 49 | 50 | The Tensorboard can be run as follows in the terminal: 51 | 52 | .. code:: shell 53 | 54 | tensorboard --logdir="absolute/path/to/log_dir" 55 | 56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /codes/python/1-basics/basic_math_operations/code/basic_math_operation.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | ########## Welcome to TensorFlow World ############## 3 | ##################################################### 4 | 5 | # The tutorials in this section is just a start for math operations. 6 | # The TensorFlow flags are used for having a more user friendly environment. 7 | 8 | from __future__ import print_function 9 | import tensorflow as tf 10 | import os 11 | 12 | 13 | ###################################### 14 | ######### Necessary Flags ############ 15 | # #################################### 16 | 17 | # The default path for saving event files is the same folder of this python file. 18 | tf.app.flags.DEFINE_string( 19 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 20 | 'Directory where event logs are written to.') 21 | 22 | # Store all elemnts in FLAG structure! 23 | FLAGS = tf.app.flags.FLAGS 24 | 25 | ################################################ 26 | ################# handling errors!############## 27 | ################################################ 28 | 29 | # The user is prompted to input an absolute path. 30 | # os.path.expanduser is leveraged to transform '~' sign to the corresponding path indicator. 31 | # Example: '~/logs' equals to '/home/username/logs' 32 | if not os.path.isabs(os.path.expanduser(FLAGS.log_dir)): 33 | raise ValueError('You must assign absolute path for --log_dir') 34 | 35 | 36 | # Defining some constant values 37 | a = tf.constant(5.0, name="a") 38 | b = tf.constant(10.0, name="b") 39 | 40 | # Some basic operations 41 | x = tf.add(a, b, name="add") 42 | y = tf.div(a, b, name="divide") 43 | 44 | # Run the session 45 | with tf.Session() as sess: 46 | writer = tf.summary.FileWriter(os.path.expanduser(FLAGS.log_dir), sess.graph) 47 | print("a =", sess.run(a)) 48 | print("b =", sess.run(b)) 49 | print("a + b =", sess.run(x)) 50 | print("a/b =", sess.run(y)) 51 | 52 | # Closing the writer. 53 | writer.close() 54 | sess.close() 55 | 56 | -------------------------------------------------------------------------------- /codes/python/1-basics/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Basics 3 | ============================== 4 | 5 | 6 | -------------------------------------------------------------------------------- /codes/python/1-basics/variables/README.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | Variables 3 | ========== 4 | 5 | This source code is dedicated to define and initialize variables. 6 | 7 | 8 | -------------------------------- 9 | How to run the code in Terminal? 10 | -------------------------------- 11 | 12 | 13 | Please root to the ``code/`` directory and run the python script as the general form of below: 14 | 15 | .. code:: shell 16 | 17 | python [python_code_file.py] 18 | 19 | 20 | As an example the code can be executed as follows: 21 | 22 | .. code:: shell 23 | 24 | python variable.py 25 | 26 | ---------------------------- 27 | How to run the code in IDEs? 28 | ---------------------------- 29 | 30 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 31 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /codes/python/1-basics/variables/code/variables.py: -------------------------------------------------------------------------------- 1 | ## This code create some arbitrary variables and initialize them ### 2 | # The goal is to show how to define and initialize variables from scratch. 3 | 4 | import tensorflow as tf 5 | from tensorflow.python.framework import ops 6 | 7 | ####################################### 8 | ######## Defining Variables ########### 9 | ####################################### 10 | 11 | # Create three variables with some default values. 12 | weights = tf.Variable(tf.random_normal([2, 3], stddev=0.1), 13 | name="weights") 14 | biases = tf.Variable(tf.zeros([3]), name="biases") 15 | custom_variable = tf.Variable(tf.zeros([3]), name="custom") 16 | 17 | # Get all the variables' tensors and store them in a list. 18 | all_variables_list = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) 19 | 20 | 21 | ############################################ 22 | ######## Customized initializer ############ 23 | ############################################ 24 | 25 | ## Initialation of some custom variables. 26 | ## In this part we choose some variables and only initialize them rather than initializing all variables. 27 | 28 | # "variable_list_custom" is the list of variables that we want to initialize. 29 | variable_list_custom = [weights, custom_variable] 30 | 31 | # The initializer 32 | init_custom_op = tf.variables_initializer(var_list=variable_list_custom ) 33 | 34 | 35 | ######################################## 36 | ######## Global initializer ############ 37 | ######################################## 38 | 39 | # Method-1 40 | # Add an op to initialize the variables. 41 | init_all_op = tf.global_variables_initializer() 42 | 43 | # Method-2 44 | init_all_op = tf.variables_initializer(var_list=all_variables_list) 45 | 46 | 47 | 48 | ########################################################## 49 | ######## Initialization using other variables ############ 50 | ########################################################## 51 | 52 | # Create another variable with the same value as 'weights'. 53 | WeightsNew = tf.Variable(weights.initialized_value(), name="WeightsNew") 54 | 55 | # Now, the variable must be initialized. 56 | init_WeightsNew_op = tf.variables_initializer(var_list=[WeightsNew]) 57 | 58 | ###################################### 59 | ####### Running the session ########## 60 | ###################################### 61 | with tf.Session() as sess: 62 | # Run the initializer operation. 63 | sess.run(init_all_op) 64 | sess.run(init_custom_op) 65 | sess.run(init_WeightsNew_op) 66 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/linear_regression/README.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Linear Regression 3 | ================== 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. The documentation is available `here `_. Alternatively, you can check this ``Linear Regression using TensorFlow`` `blog post `_ for further details. 6 | 7 | .. _blogpostlinearregression: http://www.machinelearninguru.com/deep_learning/tensorflow/machine_learning_basics/linear_regresstion/linear_regression.html 8 | 9 | .. _Documentationlinearregression: https://github.com/astorfi/TensorFlow-World/wiki/Linear-Regeression 10 | 11 | ------------------- 12 | Python Environment 13 | ------------------- 14 | 15 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 16 | 17 | -------------------------------- 18 | How to run the code in Terminal? 19 | -------------------------------- 20 | 21 | 22 | Please root to the ``code/`` directory and run the python script as the general form of below: 23 | 24 | .. code:: shell 25 | 26 | python [python_code_file.py] 27 | 28 | 29 | As an example the code can be executed as follows: 30 | 31 | .. code:: shell 32 | 33 | python linear_regression.py --num_epochs=50 34 | 35 | The ``--num_epochs`` flag is to provide the number of epochs that will be used for training. The ``--num_epochs`` flag is not required because its default value is ``50`` and is provided in the source code as follows: 36 | 37 | .. code:: python 38 | 39 | tf.app.flags.DEFINE_integer( 40 | 'num_epochs', 50, 'The number of epochs for training the model. Default=50') 41 | 42 | ---------------------------- 43 | How to run the code in IDEs? 44 | ---------------------------- 45 | 46 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 47 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/linear_regression/code/linear_regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import xlrd 4 | import matplotlib.pyplot as plt 5 | import os 6 | from sklearn.utils import check_random_state 7 | 8 | # Generating artificial data. 9 | n = 50 10 | XX = np.arange(n) 11 | rs = check_random_state(0) 12 | YY = rs.randint(-20, 20, size=(n,)) + 2.0 * XX 13 | data = np.stack([XX,YY], axis=1) 14 | 15 | ####################### 16 | ## Defining flags ##### 17 | ####################### 18 | tf.app.flags.DEFINE_integer('num_epochs', 50, 'The number of epochs for training the model. Default=50') 19 | # Store all elemnts in FLAG structure! 20 | FLAGS = tf.app.flags.FLAGS 21 | 22 | 23 | # creating the weight and bias. 24 | # The defined variables will be initialized to zero. 25 | W = tf.Variable(0.0, name="weights") 26 | b = tf.Variable(0.0, name="bias") 27 | 28 | 29 | # Creating placeholders for input X and label Y. 30 | def inputs(): 31 | """ 32 | Defining the place_holders. 33 | :return: 34 | Returning the data and label place holders. 35 | """ 36 | X = tf.placeholder(tf.float32, name="X") 37 | Y = tf.placeholder(tf.float32, name="Y") 38 | return X,Y 39 | 40 | # Create the prediction. 41 | def inference(X): 42 | """ 43 | Forward passing the X. 44 | :param X: Input. 45 | :return: X*W + b. 46 | """ 47 | return X * W + b 48 | 49 | def loss(X, Y): 50 | ''' 51 | compute the loss by comparing the predicted value to the actual label. 52 | :param X: The input. 53 | :param Y: The label. 54 | :return: The loss over the samples. 55 | ''' 56 | 57 | # Making the prediction. 58 | Y_predicted = inference(X) 59 | return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))/(2*data.shape[0]) 60 | 61 | 62 | # The training function. 63 | def train(loss): 64 | learning_rate = 0.0001 65 | return tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) 66 | 67 | 68 | with tf.Session() as sess: 69 | 70 | # Initialize the variables[w and b]. 71 | sess.run(tf.global_variables_initializer()) 72 | 73 | # Get the input tensors 74 | X, Y = inputs() 75 | 76 | # Return the train loss and create the train_op. 77 | train_loss = loss(X, Y) 78 | train_op = train(train_loss) 79 | 80 | # Step 8: train the model 81 | for epoch_num in range(FLAGS.num_epochs): # run 100 epochs 82 | loss_value, _ = sess.run([train_loss,train_op], 83 | feed_dict={X: data[:,0], Y: data[:,1]}) 84 | 85 | # Displaying the loss per epoch. 86 | print('epoch %d, loss=%f' %(epoch_num+1, loss_value)) 87 | 88 | # save the values of weight and bias 89 | wcoeff, bias = sess.run([W, b]) 90 | 91 | 92 | ############################### 93 | #### Evaluate and plot ######## 94 | ############################### 95 | Input_values = data[:,0] 96 | Labels = data[:,1] 97 | Prediction_values = data[:,0] * wcoeff + bias 98 | 99 | # # uncomment if plotting is desired! 100 | # plt.plot(Input_values, Labels, 'ro', label='main') 101 | # plt.plot(Input_values, Prediction_values, label='Predicted') 102 | 103 | # # Saving the result. 104 | # plt.legend() 105 | # plt.savefig('plot.png') 106 | # plt.close() 107 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/linear_regression/updating_model.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/2-basics_in_machine_learning/linear_regression/updating_model.gif -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/linear_svm/README.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Linear SVM 3 | =========== 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. For this tutorial, we will create a linear SVM for separation of the data. The data that is used for this code is linearly separable. 6 | 7 | ------------------- 8 | Python Environment 9 | ------------------- 10 | 11 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 12 | 13 | -------------------------------- 14 | How to run the code in Terminal? 15 | -------------------------------- 16 | 17 | 18 | Please root to the ``code/`` directory and run the python script as the general form of below: 19 | 20 | .. code:: shell 21 | 22 | python [python_code_file.py] 23 | 24 | 25 | As an example the code can be executed as follows: 26 | 27 | .. code:: shell 28 | 29 | python linear_svm.py 30 | 31 | ---------------------------- 32 | How to run the code in IDEs? 33 | ---------------------------- 34 | 35 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 36 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/linear_svm/code/linear_svm.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import tensorflow as tf 4 | from sklearn import datasets 5 | import random 6 | import sys 7 | 8 | 9 | ####################### 10 | ### Necessary Flags ### 11 | ####################### 12 | 13 | tf.app.flags.DEFINE_integer('batch_size', 32, 14 | 'Number of samples per batch.') 15 | 16 | tf.app.flags.DEFINE_integer('num_steps', 500, 17 | 'Number of steps for training.') 18 | 19 | tf.app.flags.DEFINE_boolean('is_evaluation', True, 20 | 'Whether or not the model should be evaluated.') 21 | 22 | tf.app.flags.DEFINE_float( 23 | 'C_param', 0.1, 24 | 'penalty parameter of the error term.') 25 | 26 | tf.app.flags.DEFINE_float( 27 | 'Reg_param', 1.0, 28 | 'penalty parameter of the error term.') 29 | 30 | tf.app.flags.DEFINE_float( 31 | 'delta', 1.0, 32 | 'The parameter set for margin.') 33 | 34 | tf.app.flags.DEFINE_float( 35 | 'initial_learning_rate', 0.1, 36 | 'The initial learning rate for optimization.') 37 | 38 | FLAGS = tf.app.flags.FLAGS 39 | 40 | 41 | ########################## 42 | ### Required Functions ### 43 | ########################## 44 | 45 | def loss_fn(W,b,x_data,y_target): 46 | logits = tf.subtract(tf.matmul(x_data, W),b) 47 | norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2) 48 | classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target)))) 49 | total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term)) 50 | return total_loss 51 | 52 | def inference_fn(W,b,x_data,y_target): 53 | prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b)) 54 | accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32)) 55 | return accuracy 56 | 57 | def next_batch_fn(x_train,y_train,num_samples=FLAGS.batch_size): 58 | index = np.random.choice(len(x_train), size=num_samples) 59 | X_batch = x_train[index] 60 | y_batch = np.transpose([y_train[index]]) 61 | return X_batch, y_batch 62 | 63 | ########################## 64 | ### Dataset peparation ### 65 | ########################## 66 | 67 | # Dataset loading and organizing. 68 | iris = datasets.load_iris() 69 | 70 | # Only the first two features are extracted and used. 71 | X = iris.data[:, :2] 72 | 73 | # The labels are transformed to -1 and 1. 74 | y = np.array([1 if label==0 else -1 for label in iris.target]) 75 | 76 | # Get the indices for train and test sets. 77 | my_randoms = np.random.choice(X.shape[0], X.shape[0], replace=False) 78 | train_indices = my_randoms[0:int(0.5 * X.shape[0])] 79 | test_indices = my_randoms[int(0.5 * X.shape[0]):] 80 | 81 | # Splitting train and test sets. 82 | x_train = X[train_indices] 83 | y_train = y[train_indices] 84 | x_test = X[test_indices] 85 | y_test = y[test_indices] 86 | 87 | ############################# 88 | ### Defining Placeholders ### 89 | ############################# 90 | 91 | x_data = tf.placeholder(shape=[None, X.shape[1]], dtype=tf.float32) 92 | y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) 93 | W = tf.Variable(tf.random_normal(shape=[X.shape[1],1])) 94 | b = tf.Variable(tf.random_normal(shape=[1,1])) 95 | 96 | # Calculation of loss and accuracy. 97 | total_loss = loss_fn(W, b, x_data, y_target) 98 | accuracy = inference_fn(W, b, x_data, y_target) 99 | 100 | # Defining train_op 101 | train_op = tf.train.GradientDescentOptimizer(FLAGS.initial_learning_rate).minimize(total_loss) 102 | 103 | ############### 104 | ### Session ### 105 | ############### 106 | sess = tf.Session() 107 | 108 | # Initialization of the variables. 109 | init = tf.initialize_all_variables() 110 | sess.run(init) 111 | 112 | ############################### 113 | ### Training the Linear SVM ### 114 | ############################### 115 | for step_idx in range(FLAGS.num_steps): 116 | 117 | # Get the batch of data. 118 | X_batch, y_batch = next_batch_fn(x_train, y_train, num_samples=FLAGS.batch_size) 119 | 120 | # Run the optimizer. 121 | sess.run(train_op, feed_dict={x_data: X_batch, y_target: y_batch}) 122 | 123 | # Calculation of loss and accuracy. 124 | loss_step = sess.run(total_loss, feed_dict={x_data: X_batch, y_target: y_batch}) 125 | train_acc_step = sess.run(accuracy, feed_dict={x_data: x_train, y_target: np.transpose([y_train])}) 126 | test_acc_step = sess.run(accuracy, feed_dict={x_data: x_test, y_target: np.transpose([y_test])}) 127 | 128 | # Displaying the desired values. 129 | if step_idx % 100 == 0: 130 | print('Step #%d, training accuracy= %% %.2f, testing accuracy= %% %.2f ' % (step_idx, float(100 * train_acc_step), float(100 * test_acc_step))) 131 | 132 | if FLAGS.is_evaluation: 133 | [[w1], [w2]] = sess.run(W) 134 | [[bias]] = sess.run(b) 135 | x_line = [data[1] for data in X] 136 | 137 | # Find the separator line. 138 | line = [] 139 | line = [-w2/w1*i+bias/w1 for i in x_line] 140 | 141 | # coor_pos_list = [positive_X, positive_y] 142 | # coor_neg_list = [negative_X, negative_y] 143 | 144 | for index, data in enumerate(X): 145 | if y[index] == 1: 146 | positive_X = data[1] 147 | positive_y = data[0] 148 | elif y[index] == -1: 149 | negative_X = data[1] 150 | negative_y = data[0] 151 | else: 152 | sys.exit("Invalid label!") 153 | 154 | # # uncomment if plotting is desired! 155 | # # Plotting the SVM decision boundary. 156 | # plt.plot(positive_X, positive_y, '+', label='Positive') 157 | # plt.plot(negative_X, negative_y, 'o', label='Negative') 158 | # plt.plot(x_line, line, 'r-', label='Separator', linewidth=3) 159 | # plt.legend(loc='best') 160 | # plt.title('Linear SVM') 161 | # plt.show() 162 | 163 | 164 | 165 | 166 | 167 | 168 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/logistic_regression/README.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Logistic Regression 3 | ================== 4 | 5 | This document is dedicated to explaining how to run the python script for this tutorial. ``Logistic regression`` is a binary 6 | classification algorithm in which `yes` or `no` are the only possible responses. The linear output is transformed to a probability of course between zero and 1. The decision is made by thresholding the probability and saying it belongs to which class. We consider ``Softmax`` with ``cross entropy`` loss for minimizing the loss. 7 | 8 | ------------------- 9 | Python Environment 10 | ------------------- 11 | 12 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 13 | 14 | -------------------------------- 15 | How to run the code in Terminal? 16 | -------------------------------- 17 | 18 | Please root to the ``code/`` directory and run the python script as the general form of below: 19 | 20 | .. code:: shell 21 | 22 | python [python_code_file.py] 23 | 24 | 25 | As an example the code can be executed as follows: 26 | 27 | .. code:: shell 28 | 29 | python logistic_regression.py --num_epochs=50 --batch_size=512 --max_num_checkpoint=10 --num_classes=2 30 | 31 | Different ``flags`` are provided for training. For the full list please refer to the source code. The above example is just an example as is! 32 | 33 | ---------------------------- 34 | How to run the code in IDEs? 35 | ---------------------------- 36 | 37 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 38 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/multiclass_svm/README.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Multi-Class Kernel SVM 3 | ======================= 4 | 5 | This document is dedicated to explain how to run the python script for this tutorial. For this tutorial, we will create a Kernel SVM for separation of the data. The data that is used for this code is MNIST dataset. This document is inspired on `Implementing Multiclass SVMs `_ open source code. However, in ours, we extend it to MNIST dataset and modify its method. 6 | 7 | .. _Multiclasssvm: https://github.com/nfmcclure/tensorflow_cookbook/tree/master/04_Support_Vector_Machines/06_Implementing_Multiclass_SVMs 8 | 9 | 10 | ------------------- 11 | Python Environment 12 | ------------------- 13 | 14 | ``WARNING:`` If TensorFlow is installed in any environment(virtual environment, ...), it must be activated at first. So at first make sure the tensorFlow is available in the current environment using the following script: 15 | 16 | -------------------------------- 17 | How to run the code in Terminal? 18 | -------------------------------- 19 | 20 | 21 | Please root to the ``code/`` directory and run the python script as the general form of below: 22 | 23 | .. code:: shell 24 | 25 | python [python_code_file.py] 26 | 27 | 28 | As an example the code can be executed as follows: 29 | 30 | .. code:: shell 31 | 32 | python multiclass_SVM.py 33 | 34 | ---------------------------- 35 | How to run the code in IDEs? 36 | ---------------------------- 37 | 38 | Since the code is ready-to-go, as long as the TensorFlow can be called in the IDE editor(Pycharm, Spyder,..), the code can be executed successfully. 39 | -------------------------------------------------------------------------------- /codes/python/2-basics_in_machine_learning/multiclass_svm/code/multiclass_svm.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import tensorflow as tf 4 | from sklearn import datasets 5 | from tensorflow.python.framework import ops 6 | from tensorflow.examples.tutorials.mnist import input_data 7 | from sklearn.decomposition import PCA 8 | 9 | ####################### 10 | ### Necessary Flags ### 11 | ####################### 12 | 13 | tf.app.flags.DEFINE_integer('batch_size', 50, 14 | 'Number of samples per batch.') 15 | 16 | tf.app.flags.DEFINE_integer('num_steps', 1000, 17 | 'Number of steps for training.') 18 | 19 | tf.app.flags.DEFINE_integer('log_steps', 50, 20 | 'Number of steps per each display.') 21 | 22 | tf.app.flags.DEFINE_boolean('is_evaluation', True, 23 | 'Whether or not the model should be evaluated.') 24 | 25 | tf.app.flags.DEFINE_float( 26 | 'gamma', -15.0, 27 | 'penalty parameter of the error term.') 28 | 29 | tf.app.flags.DEFINE_float( 30 | 'initial_learning_rate', 0.01, 31 | 'The initial learning rate for optimization.') 32 | 33 | FLAGS = tf.app.flags.FLAGS 34 | 35 | 36 | ########################### 37 | ### Necessary Functions ### 38 | ########################### 39 | def cross_class_label_fn(A): 40 | """ 41 | This function take the matrix of size (num_classes, batch_size) and return the cross-class label matrix 42 | in which Yij are the elements where i,j are class indices. 43 | :param A: The input matrix of size (num_classes, batch_size). 44 | :return: The output matrix of size (num_classes, batch_size, batch_size). 45 | """ 46 | label_class_i = tf.reshape(A, [num_classes, 1, FLAGS.batch_size]) 47 | label_class_j = tf.reshape(label_class_i, [num_classes, FLAGS.batch_size, 1]) 48 | returned_mat = tf.matmul(label_class_j, label_class_i) 49 | return returned_mat 50 | 51 | 52 | # Compute SVM loss. 53 | def loss_fn(alpha, label_placeholder): 54 | term_1 = tf.reduce_sum(alpha) 55 | alpha_cross = tf.matmul(tf.transpose(alpha), alpha) 56 | cross_class_label = cross_class_label_fn(label_placeholder) 57 | term_2 = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(alpha_cross, cross_class_label)), [1, 2]) 58 | return tf.reduce_sum(tf.subtract(term_2, term_1)) 59 | 60 | 61 | # Gaussian (RBF) prediction kernel 62 | def kernel_pred(x_data, prediction_grid): 63 | A = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1]) 64 | B = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1]) 65 | square_distance = tf.add(tf.subtract(A, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), 66 | tf.transpose(B)) 67 | return tf.exp(tf.multiply(gamma, tf.abs(square_distance))) 68 | 69 | 70 | def kernel_fn(x_data, gamma): 71 | """ 72 | This function generates the RBF kernel. 73 | :param x_data: Input data 74 | :param gamma: Hyperparamet. 75 | :return: The RBF kernel. 76 | """ 77 | square_distance = tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data))) 78 | kernel = tf.exp(tf.multiply(gamma, tf.abs(square_distance))) 79 | return kernel 80 | 81 | 82 | def prepare_label_fn(label_onehot): 83 | """ 84 | Label preparation. Since we are dealing with one vs all scenario, for each sample 85 | all the labels other than the current class must be set to -1. It can be done by simply 86 | Setting all the zero values to -1 in the return one_hot array for classes. 87 | 88 | :param label_onehot: The input as one_hot label which shape (num_samples,num_classes) 89 | :return: The output with the same shape and all zeros tured to -1. 90 | """ 91 | labels = label_onehot 92 | labels[labels == 0] = -1 93 | labels = np.transpose(labels) 94 | return labels 95 | 96 | 97 | def next_batch(X, y, batch_size): 98 | """ 99 | Generating a batch of random data. 100 | :param x_train: 101 | :param batch_size: 102 | :return: 103 | """ 104 | idx = np.random.choice(len(X), size=batch_size) 105 | X_batch = X[idx] 106 | y_batch = y[:, idx] 107 | return X_batch, y_batch 108 | 109 | 110 | ######################## 111 | ### Data Preparation ### 112 | ######################## 113 | 114 | # Read MNIST data. It has a data structure. 115 | # mnist.train.images, mnist.train.labels: The training set images and their associated labels. 116 | # mnist.validation.images, mnist.validation.labels: The validation set images and their associated labels. 117 | # mnist.test.images, mnist.test.labels: The test set images and their associated labels. 118 | 119 | # Flags: 120 | # "reshape=True", by this flag, the data will be reshaped to (num_samples,num_features) 121 | # and since each image is 28x28, the num_features = 784 122 | # "one_hot=True", this flag return one_hot labeling format 123 | # ex: sample_label [1 0 0 0 0 0 0 0 0 0] says the sample belongs to the first class. 124 | mnist = input_data.read_data_sets("MNIST_data/", reshape=True, one_hot=True) 125 | 126 | # Label preparation. 127 | y_train = prepare_label_fn(mnist.train.labels) 128 | y_test = prepare_label_fn(mnist.test.labels) 129 | 130 | # Get the number of classes. 131 | num_classes = y_train.shape[0] 132 | 133 | ########################################## 134 | ### Dimensionality Reduction Using PCA ### 135 | ########################################## 136 | pca = PCA(n_components=100) 137 | pca.fit(mnist.train.images) 138 | 139 | # print the accumulative variance for the returned principle components. 140 | print("The variance of the chosen components = %{0:.2f}".format(100 * np.sum(pca.explained_variance_ratio_))) 141 | x_train = pca.transform(mnist.train.images) 142 | x_test = pca.transform(mnist.test.images) 143 | num_fetures = x_train.shape[1] 144 | 145 | ############################ 146 | ### Graph & Optimization ### 147 | ############################ 148 | # Create graph 149 | sess = tf.Session() 150 | 151 | # Initialize placeholders 152 | data_placeholder = tf.placeholder(shape=[None, num_fetures], dtype=tf.float32) 153 | label_placeholder = tf.placeholder(shape=[num_classes, None], dtype=tf.float32) 154 | pred_placeholder = tf.placeholder(shape=[None, num_fetures], dtype=tf.float32) 155 | 156 | # The alpha variable for solving the dual optimization problem. 157 | alpha = tf.Variable(tf.random_normal(shape=[num_classes, FLAGS.batch_size])) 158 | 159 | # Gaussian (RBF) kernel 160 | gamma = tf.constant(FLAGS.gamma) 161 | 162 | # RBF kernel 163 | my_kernel = kernel_fn(data_placeholder, gamma) 164 | 165 | # Loss calculation. 166 | loss = loss_fn(alpha, label_placeholder) 167 | 168 | # Generating the prediction kernel. 169 | pred_kernel = kernel_pred(data_placeholder, pred_placeholder) 170 | 171 | ############################# 172 | ### Prediction & Accuracy ### 173 | ############################# 174 | prediction_output = tf.matmul(tf.multiply(label_placeholder, alpha), pred_kernel) 175 | prediction = tf.arg_max(prediction_output - tf.expand_dims(tf.reduce_mean(prediction_output, 1), 1), 0) 176 | accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(label_placeholder, 0)), tf.float32)) 177 | 178 | # Optimizer 179 | train_op = tf.train.AdamOptimizer(FLAGS.initial_learning_rate).minimize(loss) 180 | 181 | # Variables Initialization. 182 | init = tf.global_variables_initializer() 183 | sess.run(init) 184 | 185 | # Training loop 186 | for i in range(FLAGS.num_steps): 187 | 188 | batch_X, batch_y = next_batch(x_train, y_train, FLAGS.batch_size) 189 | sess.run(train_op, feed_dict={data_placeholder: batch_X, label_placeholder: batch_y}) 190 | 191 | temp_loss = sess.run(loss, feed_dict={data_placeholder: batch_X, label_placeholder: batch_y}) 192 | 193 | acc_train_batch = sess.run(accuracy, feed_dict={data_placeholder: batch_X, 194 | label_placeholder: batch_y, 195 | pred_placeholder: batch_X}) 196 | 197 | batch_X_test, batch_y_test = next_batch(x_test, y_test, FLAGS.batch_size) 198 | acc_test_batch = sess.run(accuracy, feed_dict={data_placeholder: batch_X_test, 199 | label_placeholder: batch_y_test, 200 | pred_placeholder: batch_X_test}) 201 | 202 | if (i + 1) % FLAGS.log_steps == 0: 203 | print('Step #%d, Loss= %f, training accuracy= %f, testing accuracy= %f ' % ( 204 | (i+1), temp_loss, acc_train_batch, acc_test_batch)) 205 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/convolutional-neural-network/code/__init__.py -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/auxiliary/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/convolutional-neural-network/code/auxiliary/__init__.py -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/auxiliary/progress_bar.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def print_progress(progress, epoch_num, loss): 5 | """ 6 | This function draw an active progress bar. 7 | :param progress: Where we are: 8 | type: float 9 | value: [0,1] 10 | :param epoch_num: number of epochs for training 11 | :param loss: The loss for the specific batch in training phase. 12 | 13 | :return: Progressing bar 14 | """ 15 | 16 | # Define the length of bar 17 | barLength = 30 18 | 19 | # Ceck the input! 20 | assert type(progress) is float, "id is not a float: %r" % id 21 | assert 0 <= progress <= 1, "variable should be between zero and one!" 22 | 23 | # Empty status while processing. 24 | status = "" 25 | 26 | # This part is to make a new line when the process is finished. 27 | if progress >= 1: 28 | progress = 1 29 | status = "\r\n" 30 | 31 | # Where we are in the progress! 32 | indicator = int(round(barLength*progress)) 33 | 34 | # Print the appropriate progress phase! 35 | list = [str(epoch_num), "#"*indicator , "-"*(barLength-indicator), progress*100, loss, status] 36 | text = "\rEpoch {0[0]} {0[1]} {0[2]} %{0[3]:.2f} loss={0[4]:.3f} {0[5]}".format(list) 37 | sys.stdout.write(text) 38 | sys.stdout.flush() 39 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/auxiliary/progress_bar.py~: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def print_progress(progress, epoch_num, loss): 5 | """ 6 | This function draw an active progress bar. 7 | :param progress: Where we are: 8 | type: float 9 | value: [0,1] 10 | :param epoch_num: number of epochs for training 11 | :param loss: The loss for the specific batch in training phase. 12 | 13 | :return: Progressing bar 14 | """ 15 | 16 | # Define the length of bar 17 | barLength = 30 18 | 19 | # Ceck the input! 20 | assert type(progress) is float, "id is not a float: %r" % id 21 | assert 0 <= progress <= 1, "variable should be between zero and one!" 22 | 23 | # Empty status while processing. 24 | status = "" 25 | 26 | # This part is to make a new line when the process is finished. 27 | if progress >= 1: 28 | progress = 1 29 | status = "\r\n" 30 | 31 | # Where we are in the progress! 32 | indicator = int(round(barLength*progress)) 33 | 34 | # Print the appropriate progress phase! 35 | list = [str(epoch_num), "#"*indicator , "-"*(barLength-indicator), progress*100, loss, status] 36 | text = "\rEpoch {0[0]} {0[1]} {0[2]} %{0[3]:.2f} loss={0[4]:.3f} {0[5]}".format(list) 37 | sys.stdout.write(text) 38 | sys.stdout.flush() 39 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/evaluation.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python test_classifier.py \ 4 | --batch_size=512 \ 5 | --allow_soft_placement 6 | 7 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/input_function/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/convolutional-neural-network/code/input_function/__init__.py -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/input_function/input.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import collections 3 | 4 | 5 | class DATA_OBJECT(object): 6 | def __init__(self, 7 | images, 8 | labels, 9 | num_classes=0, 10 | one_hot=False, 11 | dtype=np.float32, 12 | reshape=False): 13 | """Data object construction. 14 | images: The images of size [num_samples, rows, columns, depth]. 15 | labels: The labels of size [num_samples,] 16 | num_classes: The number of classes in case one_hot labeling is desired. 17 | one_hot=False: Turn the labels into one_hot format. 18 | dtype=np.float32: The data type. 19 | reshape=False: Reshape in case the feature vector extraction is desired. 20 | 21 | """ 22 | # Define the date type. 23 | if dtype not in (np.uint8, np.float32): 24 | raise TypeError('Invalid image dtype %r, expected uint8 or float32' % 25 | dtype) 26 | assert images.shape[0] == labels.shape[0], ( 27 | 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) 28 | self._num_samples = images.shape[0] 29 | 30 | # [num_examples, rows, columns, depth] -> [num_examples, rows*columns] 31 | if reshape: 32 | assert images.shape[3] == 1 33 | images = images.reshape(images.shape[0], 34 | images.shape[1] * images.shape[2]) 35 | 36 | # Conver to float if necessary 37 | if dtype == np.float32: 38 | # Convert from [0, 255] -> [0.0, 1.0]. 39 | images = images.astype(dtype) 40 | images = np.multiply(images, 1.0 / 255.0) 41 | self._images = images 42 | self._labels = labels 43 | 44 | # If the one_hot flag is true, then the one_hot labeling supersedes the normal labeling. 45 | if one_hot: 46 | # If the one_hot labeling is desired, number of classes must be defined as one of the arguments of DATA_OBJECT class! 47 | assert num_classes != 0, ( 48 | 'You must specify the num_classes in the DATA_OBJECT for one_hot label construction!') 49 | 50 | # Define the indexes. 51 | index = np.arange(self._num_samples) * num_classes 52 | one_hot_labels = np.zeros((self._num_samples, num_classes)) 53 | one_hot_labels.flat[index + labels.ravel()] = 1 54 | self._labels = one_hot_labels 55 | 56 | @property 57 | def images(self): 58 | return self._images 59 | 60 | @property 61 | def labels(self): 62 | return self._labels 63 | 64 | @property 65 | def num_samples(self): 66 | return self._num_samples 67 | 68 | 69 | def provide_data(mnist): 70 | """ 71 | This function provide data object with desired shape. 72 | The attribute of data object: 73 | - train 74 | - validation 75 | - test 76 | The sub attributs of the data object attributes: 77 | -images 78 | -labels 79 | 80 | :param mnist: The downloaded MNIST dataset 81 | :return: data: The data object. 82 | ex: data.train.images return the images of the dataset object in the training set! 83 | 84 | 85 | """ 86 | ################################################ 87 | ########## Get the images and labels############ 88 | ################################################ 89 | 90 | # Note: This setup is specific to mnist data but can be generalized for any data. 91 | # The ?_images(? can be train, validation or test) must have the format of [num_samples, rows, columns, depth] after extraction from data. 92 | # The ?_labels(? can be train, validation or test) must have the format of [num_samples,] after extraction from data. 93 | train_images = mnist.train.images 94 | train_labels = mnist.train.labels 95 | validation_images = mnist.validation.images 96 | validation_labels = mnist.validation.labels 97 | test_images = mnist.test.images 98 | test_labels = mnist.test.labels 99 | 100 | # Create separate objects for train, validation & test. 101 | train = DATA_OBJECT(train_images, train_labels, num_classes=10, one_hot=True, dtype=np.float32, reshape=False) 102 | validation = DATA_OBJECT(validation_images, validation_labels, num_classes=10, one_hot=True, dtype=np.float32, 103 | reshape=False) 104 | test = DATA_OBJECT(test_images, test_labels, num_classes=10, one_hot=True, dtype=np.float32, reshape=False) 105 | 106 | # Create the whole data object 107 | DataSetObject = collections.namedtuple('DataSetObject', ['train', 'validation', 'test']) 108 | data = DataSetObject(train=train, validation=validation, test=test) 109 | 110 | return data 111 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/net_structure/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/convolutional-neural-network/code/net_structure/__init__.py -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/net_structure/net.py: -------------------------------------------------------------------------------- 1 | ##################################### 2 | # With some tiny modification, this code is the one used by Tensorflow slim at: 3 | # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim 4 | # Please refer to the link for further explanations. 5 | 6 | ### The difference is this architecture is written in fully-convolutional fashion. 7 | ### The advantage is that, this model can be used for larger image sizes with some average pooling in the last layer. 8 | 9 | import tensorflow as tf 10 | 11 | slim = tf.contrib.slim 12 | 13 | 14 | def net_architecture(images, num_classes=10, is_training=False, 15 | dropout_keep_prob=0.5, 16 | spatial_squeeze=True, 17 | scope='Net'): 18 | """Creates a variant of the Net model. 19 | 20 | Args: 21 | images: The batch of `Tensors`: size [batch_size, height, width, channels]. 22 | num_classes: Total number of classes. 23 | is_training: Training/Validation. 24 | dropout_keep_prob: The percentage of activation values: Only active in training mode! 25 | scope: Variable_scope. 26 | 27 | Returns: 28 | logits: the pre-softmax activations of size [batch_size, `num_classes`] 29 | end_points: The dictionary for the layers outputs. 30 | """ 31 | 32 | # Create empty dictionary 33 | end_points = {} 34 | 35 | with tf.variable_scope(scope, 'Net', [images, num_classes]) as sc: 36 | end_points_collection = sc.name + '_end_points' 37 | 38 | # Collect outputs for conv2d and max_pool2d. 39 | with tf.contrib.framework.arg_scope([tf.contrib.layers.conv2d, tf.contrib.layers.max_pool2d], 40 | outputs_collections=end_points_collection): 41 | # Layer-1 42 | net = tf.contrib.layers.conv2d(images, 32, [5, 5], scope='conv1') 43 | net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool1') 44 | 45 | # Layer-2 46 | net = tf.contrib.layers.conv2d(net, 64, [5, 5], scope='conv2') 47 | net = tf.contrib.layers.max_pool2d(net, [2, 2], 2, scope='pool2') 48 | 49 | # Layer-3 50 | net = tf.contrib.layers.conv2d(net, 1024, [7, 7], padding='VALID', scope='fc3') 51 | net = tf.contrib.layers.dropout(net, dropout_keep_prob, is_training=is_training, 52 | scope='dropout3') 53 | 54 | # Last layer which is the logits for classes 55 | logits = tf.contrib.layers.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='fc4') 56 | 57 | # Return the collections as a dictionary 58 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 59 | 60 | # Squeeze spatially to eliminate extra dimensions.(embedding layer) 61 | if spatial_squeeze: 62 | logits = tf.squeeze(logits, [1, 2], name='fc4/squeezed') 63 | end_points[sc.name + '/fc4'] = logits 64 | return logits, end_points 65 | 66 | 67 | def net_arg_scope(weight_decay=0.0005, is_training=False): 68 | """Defines the default network argument scope. 69 | 70 | Args: 71 | weight_decay: The weight decay to use for regularizing the model. 72 | 73 | Returns: 74 | An `arg_scope` to use for the model. 75 | """ 76 | if is_training: 77 | with tf.contrib.framework.arg_scope( 78 | [tf.contrib.layers.conv2d], 79 | padding='SAME', 80 | weights_regularizer=slim.l2_regularizer(weight_decay), 81 | weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', 82 | uniform=False, seed=None, 83 | dtype=tf.float32), 84 | activation_fn=tf.nn.relu) as sc: 85 | return sc 86 | 87 | else: 88 | with tf.contrib.framework.arg_scope( 89 | [tf.contrib.layers.conv2d], 90 | padding='SAME', 91 | activation_fn=tf.nn.relu) as sc: 92 | return sc 93 | 94 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/script/evaluation.sh: -------------------------------------------------------------------------------- 1 | # Where the logs will be saved to. 2 | test_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/test_logs 3 | 4 | # Where the checkpoints is saved to. 5 | checkpoint_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/checkpoints 6 | 7 | 8 | # Run training. 9 | python test_classifier.py \ 10 | --test_dir=${test_dir} \ 11 | --checkpoint_dir=${checkpoint_dir} \ 12 | --batch_size=512 \ 13 | --num_epochs=2 \ 14 | --allow_soft_placement 15 | 16 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/script/train.sh: -------------------------------------------------------------------------------- 1 | # Where the logs will be saved to. 2 | train_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/train_logs 3 | 4 | # Where the checkpoints is saved to. 5 | checkpoint_dir=/home/sina/GITHUB/Tensorflow-Turorials/NeuralNetworks/convolutional-neural-network/code/checkpoints 6 | 7 | 8 | # Run training. 9 | python train_classifier.py \ 10 | --train_dir=${train_dir} \ 11 | --checkpoint_dir=${checkpoint_dir} \ 12 | --batch_size=512 \ 13 | --num_epochs=20 \ 14 | --max_num_checkpoint=10 \ 15 | --is_training \ 16 | --allow_soft_placement 17 | 18 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/train.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python train_classifier.py \ 4 | --batch_size=512 \ 5 | --max_num_checkpoint=10 \ 6 | --num_classes=10 \ 7 | --num_epochs=1 \ 8 | --initial_learning_rate=0.001 \ 9 | --num_epochs_per_decay=1 \ 10 | --is_training=True \ 11 | --allow_soft_placement=True \ 12 | --fine_tuning=False \ 13 | --online_test=True \ 14 | --log_device_placement=False 15 | 16 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/code/train_evaluation.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import tensorflow as tf 3 | import numpy as np 4 | from auxiliary import progress_bar 5 | import os 6 | import sys 7 | 8 | 9 | def train(**keywords): 10 | """ 11 | This function run the session whether in training or evaluation mode. 12 | NOTE: **keywords is defined in order to make the code easily changable. 13 | WARNING: All the arguments for the **keywords must be defined when calling this function. 14 | **keywords: 15 | :param sess: The default session. 16 | :param saver: The saver operator to save and load the model weights. 17 | :param tensors: The tensors dictionary defined by the graph. 18 | :param data: The data structure. 19 | :param train_dir: The training dir which is a reference for saving the logs and model checkpoints. 20 | :param finetuning: If fine tuning should be done or random initialization is needed. 21 | :param num_epochs: Number of epochs for training. 22 | :param online_test: If the testing is done while training. 23 | :param checkpoint_dir: The directory of the checkpoints. 24 | :param batch_size: The training batch size. 25 | 26 | :return: 27 | Run the session. 28 | """ 29 | 30 | # The prefix for checkpoint files 31 | checkpoint_prefix = 'model' 32 | 33 | ################################################################### 34 | ########## Defining the summary writers for train/test ########### 35 | ################################################################### 36 | 37 | train_summary_dir = os.path.join(keywords['train_dir'], "summaries", "train") 38 | train_summary_writer = tf.summary.FileWriter(train_summary_dir) 39 | train_summary_writer.add_graph(keywords['sess'].graph) 40 | 41 | test_summary_dir = os.path.join(keywords['train_dir'], "summaries", "test") 42 | test_summary_writer = tf.summary.FileWriter(test_summary_dir) 43 | test_summary_writer.add_graph(keywords['sess'].graph) 44 | 45 | # If fie-tuning flag in 'True' the model will be restored. 46 | if keywords['finetuning']: 47 | keywords['saver'].restore(keywords['sess'], os.path.join(keywords['checkpoint_dir'], checkpoint_prefix)) 48 | print("Model restored for fine-tuning...") 49 | 50 | ################################################################### 51 | ########## Run the training and loop over the batches ############# 52 | ################################################################### 53 | for epoch in range(keywords['num_epochs']): 54 | total_batch_training = int(keywords['data'].train.images.shape[0] / keywords['batch_size']) 55 | 56 | # go through the batches 57 | for batch_num in range(total_batch_training): 58 | ################################################# 59 | ########## Get the training batches ############# 60 | ################################################# 61 | 62 | start_idx = batch_num * keywords['batch_size'] 63 | end_idx = (batch_num + 1) * keywords['batch_size'] 64 | 65 | # Fit training using batch data 66 | train_batch_data, train_batch_label = keywords['data'].train.images[start_idx:end_idx], keywords[ 67 | 'data'].train.labels[ 68 | start_idx:end_idx] 69 | 70 | ######################################## 71 | ########## Run the session ############# 72 | ######################################## 73 | 74 | # Run optimization op (backprop) and Calculate batch loss and accuracy 75 | # When the tensor tensors['global_step'] is evaluated, it will be incremented by one. 76 | batch_loss, _, train_summaries, training_step = keywords['sess'].run( 77 | [keywords['tensors']['cost'], keywords['tensors']['train_op'], keywords['tensors']['summary_train_op'], 78 | keywords['tensors']['global_step']], 79 | feed_dict={keywords['tensors']['image_place']: train_batch_data, 80 | keywords['tensors']['label_place']: train_batch_label, 81 | keywords['tensors']['dropout_param']: 0.5}) 82 | 83 | ######################################## 84 | ########## Write summaries ############# 85 | ######################################## 86 | 87 | # Write the summaries 88 | train_summary_writer.add_summary(train_summaries, global_step=training_step) 89 | 90 | # # Write the specific summaries for training phase. 91 | # train_summary_writer.add_summary(train_image_summary, global_step=training_step) 92 | 93 | ################################################# 94 | ########## Plot the progressive bar ############# 95 | ################################################# 96 | 97 | progress = float(batch_num + 1) / total_batch_training 98 | progress_bar.print_progress(progress, epoch_num=epoch + 1, loss=batch_loss) 99 | 100 | # ################################################################ 101 | # ############ Summaries per epoch of training ################### 102 | # ################################################################ 103 | summary_epoch_train_op = keywords['tensors']['summary_epoch_train_op'] 104 | train_epoch_summaries = keywords['sess'].run(summary_epoch_train_op, 105 | feed_dict={keywords['tensors']['image_place']: train_batch_data, 106 | keywords['tensors']['label_place']: train_batch_label, 107 | keywords['tensors']['dropout_param']: 1.0}) 108 | 109 | # Put the summaries to the train summary writer. 110 | train_summary_writer.add_summary(train_epoch_summaries, global_step=training_step) 111 | 112 | ##################################################### 113 | ########## Evaluation on the test data ############# 114 | ##################################################### 115 | 116 | if keywords['online_test']: 117 | # WARNING: In this evaluation the whole test data is fed. In case the test data is huge this implementation 118 | # may lead to memory error. In presense of large testing samples, batch evaluation on testing is 119 | # recommended as in the training phase. 120 | test_accuracy_epoch, test_summaries = keywords['sess'].run( 121 | [keywords['tensors']['accuracy'], keywords['tensors']['summary_test_op']], 122 | feed_dict={keywords['tensors']['image_place']: keywords['data'].test.images, 123 | keywords['tensors'][ 124 | 'label_place']: keywords['data'].test.labels, 125 | keywords['tensors'][ 126 | 'dropout_param']: 1.}) 127 | print("Epoch " + str(epoch + 1) + ", Testing Accuracy= " + \ 128 | "{:.5f}".format(test_accuracy_epoch)) 129 | 130 | ########################################################### 131 | ########## Write the summaries for test phase ############# 132 | ########################################################### 133 | 134 | # Returning the value of global_step if necessary 135 | current_step = tf.train.global_step(keywords['sess'], keywords['tensors']['global_step']) 136 | 137 | # Add the couter of global step for proper scaling between train and test summuries. 138 | test_summary_writer.add_summary(test_summaries, global_step=current_step) 139 | 140 | ########################################################### 141 | ############ Saving the model checkpoint ################## 142 | ########################################################### 143 | 144 | # # The model will be saved when the training is done. 145 | 146 | # Create the path for saving the checkpoints. 147 | if not os.path.exists(keywords['checkpoint_dir']): 148 | os.makedirs(keywords['checkpoint_dir']) 149 | 150 | # save the model 151 | save_path = keywords['saver'].save(keywords['sess'], os.path.join(keywords['checkpoint_dir'], checkpoint_prefix)) 152 | print("Model saved in file: %s" % save_path) 153 | 154 | 155 | ############################################################################ 156 | ########## Run the session for pur evaluation on the test data ############# 157 | ############################################################################ 158 | 159 | 160 | def evaluation(**keywords): 161 | # The prefix for checkpoint files 162 | checkpoint_prefix = 'model' 163 | 164 | # Get the input arguments 165 | saver = keywords['saver'] 166 | sess = keywords['sess'] 167 | checkpoint_dir = keywords['checkpoint_dir'] 168 | data = keywords['data'] 169 | accuracy_tensor = keywords['tensors']['accuracy'] 170 | image_place = keywords['tensors']['image_place'] 171 | label_place = keywords['tensors']['label_place'] 172 | dropout_param = keywords['tensors']['dropout_param'] 173 | 174 | 175 | # Restoring the saved weights. 176 | saver.restore(sess, os.path.join(checkpoint_dir, checkpoint_prefix)) 177 | print("Model restored...") 178 | 179 | test_set = data.test.images 180 | test_label = data.test.labels 181 | # Evaluation of the model 182 | test_accuracy = 100 * keywords['sess'].run(accuracy_tensor, feed_dict={ 183 | image_place: test_set, 184 | label_place: test_label, 185 | dropout_param: 1.}) 186 | 187 | print("Final Test Accuracy is %% %.2f" % test_accuracy) 188 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Convolutional Neural Network 3 | ============================== 4 | 5 | This is the code repository for the blog post `Train a Convolutional Neural Network as a Classifier`__. Please refer to this `wiki page`__ for more details. 6 | 7 | .. _cnn_classifier_post: http://machinelearninguru.com/deep_learning/tensorflow/neural_networks/cnn_classifier/cnn_classifier.html 8 | __ cnn_classifier_post_ 9 | 10 | .. _cnn_classifier_wiki: https://github.com/astorfi/TensorFlow-World/wiki/Convolutional-Neural-Networks 11 | __ cnn_classifier_wiki_ 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------- 19 | Training 20 | -------- 21 | 22 | **Train:** 23 | 24 | The training can be run using the **train.sh** `bash script` file using the following command: 25 | 26 | .. code-block:: bash 27 | 28 | ./train.sh 29 | 30 | The bash script is as below: 31 | 32 | 33 | .. code-block:: bash 34 | 35 | python train_classifier.py \ 36 | --batch_size=512 \ 37 | --max_num_checkpoint=10 \ 38 | --num_classes=10 \ 39 | --num_epochs=1 \ 40 | --initial_learning_rate=0.001 \ 41 | --num_epochs_per_decay=1 \ 42 | --is_training=True \ 43 | --allow_soft_placement=True \ 44 | --fine_tuning=False \ 45 | --online_test=True \ 46 | --log_device_placement=False 47 | 48 | **helper:** 49 | 50 | In order to realize that what are the parameters as input running the following command is recommended: 51 | 52 | .. code-block:: bash 53 | 54 | python train_classifier.py --help 55 | 56 | 57 | In which `train_classifier.py` is the main file for running the training. The result of the above command will be as below: 58 | 59 | .. code-block:: bash 60 | 61 | --train_dir TRAIN_DIR 62 | Directory where event logs are written to. 63 | --checkpoint_dir CHECKPOINT_DIR 64 | Directory where checkpoints are written to. 65 | --max_num_checkpoint MAX_NUM_CHECKPOINT 66 | Maximum number of checkpoints that TensorFlow will 67 | keep. 68 | --num_classes NUM_CLASSES 69 | Number of model clones to deploy. 70 | --batch_size BATCH_SIZE 71 | Number of model clones to deploy. 72 | --num_epochs NUM_EPOCHS 73 | Number of epochs for training. 74 | --initial_learning_rate INITIAL_LEARNING_RATE 75 | Initial learning rate. 76 | --learning_rate_decay_factor LEARNING_RATE_DECAY_FACTOR 77 | Learning rate decay factor. 78 | --num_epochs_per_decay NUM_EPOCHS_PER_DECAY 79 | Number of epoch pass to decay learning rate. 80 | --is_training [IS_TRAINING] 81 | Training/Testing. 82 | --fine_tuning [FINE_TUNING] 83 | Fine tuning is desired or not?. 84 | --online_test [ONLINE_TEST] 85 | Fine tuning is desired or not?. 86 | --allow_soft_placement [ALLOW_SOFT_PLACEMENT] 87 | Automatically put the variables on CPU if there is no 88 | GPU support. 89 | --log_device_placement [LOG_DEVICE_PLACEMENT] 90 | Demonstrate which variables are on what device. 91 | 92 | 93 | ----------- 94 | Evaluation 95 | ----------- 96 | 97 | The evaluation will be run using the **evaluation.sh** `bash script` file using the following command: 98 | 99 | .. code-block:: bash 100 | 101 | ./evaluation.sh 102 | 103 | 104 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/convolutional-neural-network/requirements.txt: -------------------------------------------------------------------------------- 1 | - TensorFLow 1.0 2 | - numpy 3 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/multi-layer-perceptron/code/evaluation.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python test_classifier.py \ 4 | --batch_size=512 \ 5 | --allow_soft_placement 6 | 7 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/multi-layer-perceptron/code/train.sh: -------------------------------------------------------------------------------- 1 | 2 | # Run training. 3 | python train_mlp.py \ 4 | --batch_size=512 \ 5 | --max_num_checkpoint=10 \ 6 | --num_classes=10 \ 7 | --num_epochs=1 \ 8 | --initial_learning_rate=0.001 \ 9 | --num_epochs_per_decay=1 \ 10 | --is_training=True \ 11 | --allow_soft_placement=True \ 12 | --fine_tuning=False \ 13 | --online_test=True \ 14 | --log_device_placement=False 15 | 16 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/multi-layer-perceptron/readme.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Multi Layer Perceptron 3 | ========================= 4 | 5 | This code is developed for training a ``Multi Layer Perceptron`` architecture in which the input will be feed-forwarded to the network that contains some hidden layers. 6 | 7 | .. figure:: https://github.com/astorfi/TensorFlow-World/blob/master/docs/_img/3-neural_network/multi-layer-perceptron/neural-network.png 8 | :scale: 50 % 9 | :align: center 10 | 11 | 12 | -------- 13 | Training 14 | -------- 15 | 16 | **Train:** 17 | 18 | The training can be run using the **train.sh** `bash script` file using the following command: 19 | 20 | .. code-block:: bash 21 | 22 | ./train.sh 23 | 24 | The bash script is as below: 25 | 26 | 27 | .. code-block:: bash 28 | 29 | python train_mlp.py \ 30 | --batch_size=512 \ 31 | --max_num_checkpoint=10 \ 32 | --num_classes=10 \ 33 | --num_epochs=1 \ 34 | --initial_learning_rate=0.001 \ 35 | --num_epochs_per_decay=1 \ 36 | --is_training=True \ 37 | --allow_soft_placement=True \ 38 | --fine_tuning=False \ 39 | --online_test=True \ 40 | --log_device_placement=False 41 | 42 | **helper:** 43 | 44 | In order to realize that what are the parameters as input running the following command is recommended: 45 | 46 | .. code-block:: bash 47 | 48 | python train_mlp.py --help 49 | 50 | 51 | In which `train_mlp.py` is the main file for running the training. The result of the above command will be as below: 52 | 53 | .. code-block:: bash 54 | 55 | --train_dir TRAIN_DIR 56 | Directory where event logs are written to. 57 | --checkpoint_dir CHECKPOINT_DIR 58 | Directory where checkpoints are written to. 59 | --max_num_checkpoint MAX_NUM_CHECKPOINT 60 | Maximum number of checkpoints that TensorFlow will 61 | keep. 62 | --num_classes NUM_CLASSES 63 | Number of model clones to deploy. 64 | --batch_size BATCH_SIZE 65 | Number of model clones to deploy. 66 | --num_epochs NUM_EPOCHS 67 | Number of epochs for training. 68 | --initial_learning_rate INITIAL_LEARNING_RATE 69 | Initial learning rate. 70 | --learning_rate_decay_factor LEARNING_RATE_DECAY_FACTOR 71 | Learning rate decay factor. 72 | --num_epochs_per_decay NUM_EPOCHS_PER_DECAY 73 | Number of epoch pass to decay learning rate. 74 | --is_training [IS_TRAINING] 75 | Training/Testing. 76 | --fine_tuning [FINE_TUNING] 77 | Fine tuning is desired or not?. 78 | --online_test [ONLINE_TEST] 79 | Fine tuning is desired or not?. 80 | --allow_soft_placement [ALLOW_SOFT_PLACEMENT] 81 | Automatically put the variables on CPU if there is no 82 | GPU support. 83 | --log_device_placement [LOG_DEVICE_PLACEMENT] 84 | Demonstrate which variables are on what device. 85 | 86 | 87 | ----------- 88 | Evaluation 89 | ----------- 90 | 91 | The evaluation will be run using the **evaluation.sh** `bash script` file using the following command: 92 | 93 | .. code-block:: bash 94 | 95 | ./evaluation.sh 96 | 97 | 98 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/multi-layer-perceptron/requirements.txt: -------------------------------------------------------------------------------- 1 | - TensorFLow 1.0 2 | - numpy 3 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/readme.md: -------------------------------------------------------------------------------- 1 | The tutorial in TensorFlow related to Neural Network architectures. 2 | -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/README.rst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/recurrent-neural-networks/README.rst -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/t10k-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/codes/python/3-neural_networks/recurrent-neural-networks/code/MNIST_data/train-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /codes/python/3-neural_networks/recurrent-neural-networks/code/rnn.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import argparse 5 | 6 | # Useful function for arguments. 7 | def str2bool(v): 8 | return v.lower() in ("yes", "true") 9 | 10 | # Parser 11 | parser = argparse.ArgumentParser(description='Creating Classifier') 12 | 13 | ###################### 14 | # Optimization Flags # 15 | ###################### 16 | 17 | tf.app.flags.DEFINE_float('learning_rate', default=0.001, help='initial learning rate') 18 | tf.app.flags.DEFINE_integer('seed', default=111, help='seed') 19 | 20 | ################## 21 | # Training Flags # 22 | ################## 23 | tf.app.flags.DEFINE_integer('batch_size', default=128, help='Batch size for training') 24 | tf.app.flags.DEFINE_integer('num_epoch', default=10, help='Number of training iterations') 25 | tf.app.flags.DEFINE_integer('batch_per_log', default=10, help='Print the log at what number of batches?') 26 | 27 | ############### 28 | # Model Flags # 29 | ############### 30 | tf.app.flags.DEFINE_integer('hidden_size', default=128, help='Number of neurons for RNN hodden layer') 31 | 32 | # Store all elemnts in FLAG structure! 33 | args = tf.app.flags.FLAGS 34 | 35 | 36 | # Reset the graph set the random numbers to be the same using "seed" 37 | tf.reset_default_graph() 38 | tf.set_random_seed(args.seed) 39 | np.random.seed(args.seed) 40 | 41 | # Divide 28x28 images to rows of data to feed to RNN as sequantial information 42 | step_size = 28 43 | input_size = 28 44 | output_size = 10 45 | 46 | # Input tensors 47 | X = tf.placeholder(tf.float32, [None, step_size, input_size]) 48 | y = tf.placeholder(tf.int32, [None]) 49 | 50 | # Rnn 51 | cell = tf.nn.rnn_cell.BasicRNNCell(num_units=args.hidden_size) 52 | output, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) 53 | 54 | # Forward pass and loss calcualtion 55 | logits = tf.layers.dense(state, output_size) 56 | cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) 57 | loss = tf.reduce_mean(cross_entropy) 58 | 59 | # optimizer 60 | optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate).minimize(loss) 61 | 62 | # Prediction 63 | prediction = tf.nn.in_top_k(logits, y, 1) 64 | accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32)) 65 | 66 | # input data 67 | from tensorflow.examples.tutorials.mnist import input_data 68 | mnist = input_data.read_data_sets("MNIST_data/") 69 | 70 | # Process MNIST 71 | X_test = mnist.test.images # X_test shape: [num_test, 28*28] 72 | X_test = X_test.reshape([-1, step_size, input_size]) 73 | y_test = mnist.test.labels 74 | 75 | # initialize the variables 76 | init = tf.global_variables_initializer() 77 | 78 | # Empty list for tracking 79 | loss_train_list = [] 80 | acc_train_list = [] 81 | 82 | # train the model 83 | with tf.Session() as sess: 84 | sess.run(init) 85 | n_batches = mnist.train.num_examples // args.batch_size 86 | for epoch in range(args.num_epoch): 87 | for batch in range(n_batches): 88 | X_train, y_train = mnist.train.next_batch(args.batch_size) 89 | X_train = X_train.reshape([-1, step_size, input_size]) 90 | sess.run(optimizer, feed_dict={X: X_train, y: y_train}) 91 | loss_train, acc_train = sess.run( 92 | [loss, accuracy], feed_dict={X: X_train, y: y_train}) 93 | loss_train_list.append(loss_train) 94 | acc_train_list.append(acc_train) 95 | print('Epoch: {}, Train Loss: {:.3f}, Train Acc: {:.3f}'.format( 96 | epoch + 1, loss_train, acc_train)) 97 | loss_test, acc_test = sess.run( 98 | [loss, accuracy], feed_dict={X: X_test, y: y_test}) 99 | print('Test Loss: {:.3f}, Test Acc: {:.3f}'.format(loss_test, acc_test)) 100 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = TensorFlow-World 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/_img/0-welcome/graph-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/0-welcome/graph-run.png -------------------------------------------------------------------------------- /docs/_img/1-basics/basic_math_operations/graph-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/1-basics/basic_math_operations/graph-run.png -------------------------------------------------------------------------------- /docs/_img/1-basics/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Basics 3 | ============================== 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/_img/2-basics_in_machine_learning/linear_regression/updating_model.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/2-basics_in_machine_learning/linear_regression/updating_model.gif -------------------------------------------------------------------------------- /docs/_img/3-neural_network/autoencoder/README.rst: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_img/3-neural_network/autoencoder/ae.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/autoencoder/ae.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/accuracy_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/accuracy_train.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/activation_fc4_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/activation_fc4_train.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/architecture.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/classifier_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/classifier_image.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/convlayer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/convlayer.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/graph.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/histogram_fc4_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/histogram_fc4_train.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/loss_accuracy_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/loss_accuracy_train.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/loss_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/loss_train.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/terminal_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/terminal_training.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/convolutiona_neural_network/test_accuracy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/convolutiona_neural_network/test_accuracy.png -------------------------------------------------------------------------------- /docs/_img/3-neural_network/multi-layer-perceptron/neural-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/3-neural_network/multi-layer-perceptron/neural-network.png -------------------------------------------------------------------------------- /docs/_img/mainpage/TensorFlow_World.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/mainpage/TensorFlow_World.gif -------------------------------------------------------------------------------- /docs/_img/mainpage/Tensor_GIF.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/mainpage/Tensor_GIF.gif -------------------------------------------------------------------------------- /docs/_img/mainpage/Tensor_GIF_ff.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/mainpage/Tensor_GIF_ff.gif -------------------------------------------------------------------------------- /docs/_img/mainpage/installation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/docs/_img/mainpage/installation.gif -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # TensorFlow-World documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jun 28 22:26:19 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | # 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = ['sphinx.ext.autodoc', 34 | 'sphinx.ext.mathjax', 35 | 'sphinx.ext.viewcode', 36 | 'sphinx.ext.githubpages'] 37 | 38 | # Add any paths that contain templates here, relative to this directory. 39 | templates_path = ['_templates'] 40 | 41 | # The suffix(es) of source filenames. 42 | # You can specify multiple suffix as a list of string: 43 | # 44 | # source_suffix = ['.rst', '.md'] 45 | source_suffix = '.rst' 46 | 47 | # The master toctree document. 48 | master_doc = 'index' 49 | 50 | # General information about the project. 51 | # project = u'TensorFlow-World' 52 | copyright = u'2017, Amirsina Torfi' 53 | author = u'Amirsina Torfi' 54 | 55 | # The version info for the project you're documenting, acts as replacement for 56 | # |version| and |release|, also used in various other places throughout the 57 | # built documents. 58 | # 59 | # The short X.Y version. 60 | version = u'1.0' 61 | # The full version, including alpha/beta/rc tags. 62 | release = u'1.0' 63 | 64 | # The language for content autogenerated by Sphinx. Refer to documentation 65 | # for a list of supported languages. 66 | # 67 | # This is also used if you do content translation via gettext catalogs. 68 | # Usually you set "language" from the command line for these cases. 69 | language = None 70 | 71 | # List of patterns, relative to source directory, that match files and 72 | # directories to ignore when looking for source files. 73 | # This patterns also effect to html_static_path and html_extra_path 74 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 75 | 76 | # The name of the Pygments (syntax highlighting) style to use. 77 | pygments_style = 'sphinx' 78 | 79 | # If true, `todo` and `todoList` produce output, else they produce nothing. 80 | todo_include_todos = False 81 | 82 | 83 | # -- Options for HTML output ---------------------------------------------- 84 | 85 | # The theme to use for HTML and HTML Help pages. See the documentation for 86 | # a list of builtin themes. 87 | # 88 | html_theme = 'alabaster' 89 | 90 | # Theme options are theme-specific and customize the look and feel of a theme 91 | # further. For a list of options available for each theme, see the 92 | # documentation. 93 | # 94 | # html_theme_options = {} 95 | 96 | html_theme_options = { 97 | 'show_powered_by': False, 98 | 'github_user': 'astorfi', 99 | 'github_repo': 'TensorFlow-World', 100 | 'github_banner': True, 101 | 'show_related': False 102 | } 103 | 104 | # Add any paths that contain custom static files (such as style sheets) here, 105 | # relative to this directory. They are copied after the builtin static files, 106 | # so a file named "default.css" will overwrite the builtin "default.css". 107 | html_static_path = ['_static'] 108 | 109 | # Title 110 | html_title = 'TensorFlow World' 111 | 112 | 113 | # -- Options for HTMLHelp output ------------------------------------------ 114 | 115 | # Output file base name for HTML help builder. 116 | htmlhelp_basename = 'TensorFlow-Worlddoc' 117 | 118 | 119 | # If true, links to the reST sources are added to the pages. 120 | html_show_sourcelink = False 121 | 122 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 123 | html_show_sphinx = False 124 | 125 | 126 | # -- Options for LaTeX output --------------------------------------------- 127 | 128 | latex_elements = { 129 | # The paper size ('letterpaper' or 'a4paper'). 130 | # 131 | # 'papersize': 'letterpaper', 132 | 133 | # The font size ('10pt', '11pt' or '12pt'). 134 | # 135 | # 'pointsize': '10pt', 136 | 137 | # Additional stuff for the LaTeX preamble. 138 | # 139 | # 'preamble': '', 140 | 141 | # Latex figure (float) alignment 142 | # 143 | # 'figure_align': 'htbp', 144 | } 145 | 146 | # Grouping the document tree into LaTeX files. List of tuples 147 | # (source start file, target name, title, 148 | # author, documentclass [howto, manual, or own class]). 149 | latex_documents = [ 150 | (master_doc, 'TensorFlow-World.tex', u'TensorFlow-World Documentation', 151 | u'Amirsina Torfi', 'manual'), 152 | ] 153 | 154 | 155 | # -- Options for manual page output --------------------------------------- 156 | 157 | # One entry per manual page. List of tuples 158 | # (source start file, name, description, authors, manual section). 159 | man_pages = [ 160 | (master_doc, 'tensorflow-world', u'TensorFlow-World Documentation', 161 | [author], 1) 162 | ] 163 | 164 | 165 | # -- Options for Texinfo output ------------------------------------------- 166 | 167 | # Grouping the document tree into Texinfo files. List of tuples 168 | # (source start file, target name, title, author, 169 | # dir menu entry, description, category) 170 | texinfo_documents = [ 171 | (master_doc, 'TensorFlow-World', u'TensorFlow-World Documentation', 172 | author, 'TensorFlow-World', 'One line description of project.', 173 | 'Miscellaneous'), 174 | ] 175 | 176 | 177 | 178 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=TensorFlow-World 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/tutorials/0-welcome/README.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Welcome to TensorFlow World 3 | ============================ 4 | 5 | .. _this link: https://github.com/astorfi/TensorFlow-World/tree/master/Tutorials/0-welcome 6 | 7 | The tutorials in this section are just a start for going into the TensorFlow world. 8 | 9 | We using Tensorboard for visualizing the outcomes. TensorBoard is the graph visualization tools provided by TensorFlow. Using Google’s words: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, debug, and optimize TensorFlow programs, we've included a suite of visualization tools called TensorBoard.” A simple Tensorboard implementation is used in this tutorial. 10 | 11 | **NOTE:*** 12 | 13 | * The details of summary operations, Tensorboard, and their advantages are beyond the scope of this tutorial and will be presented in more advanced tutorials. 14 | 15 | 16 | -------------------------- 17 | Preparing the environment 18 | -------------------------- 19 | 20 | At first, we have to import the necessary libraries. 21 | 22 | .. code:: python 23 | 24 | from __future__ import print_function 25 | import tensorflow as tf 26 | import os 27 | 28 | Since we are aimed to use Tensorboard, we need a directory to store the information (the operations and their corresponding outputs if desired by the user). This information is exported to ``event files`` by TensorFlow. The event files can be transformed to visual data such that the user is able to evaluate the architecture and the operations. The ``path`` to store these event files is defined as below: 29 | 30 | .. code:: python 31 | 32 | # The default path for saving event files is the same folder of this python file. 33 | tf.app.flags.DEFINE_string( 34 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 35 | 'Directory where event logs are written to.') 36 | 37 | # Store all elements in FLAG structure! 38 | FLAGS = tf.app.flags.FLAGS 39 | 40 | The ``os.path.dirname(os.path.abspath(__file__))`` gets the directory name of the current python file. The ``tf.app.flags.FLAGS`` points to all defined flags using the ``FLAGS`` indicator. From now on the flags can be called using ``FLAGS.flag_name``. 41 | 42 | For convenience, it is useful to only work with ``absolute paths``. By using the following script, the user is prompt to use absolute paths for the ``log_dir`` directory. 43 | 44 | .. code:: python 45 | 46 | # The user is prompted to input an absolute path. 47 | # os.path.expanduser is leveraged to transform '~' sign to the corresponding path indicator. 48 | # Example: '~/logs' equals to '/home/username/logs' 49 | if not os.path.isabs(os.path.expanduser(FLAGS.log_dir)): 50 | raise ValueError('You must assign absolute path for --log_dir') 51 | 52 | ----------------- 53 | Inauguration 54 | ----------------- 55 | 56 | Some sentence can be defined by TensorFlow: 57 | 58 | .. code:: python 59 | 60 | # Defining some sentence! 61 | welcome = tf.constant('Welcome to TensorFlow world!') 62 | 63 | The ``tf.`` operator performs the specific operation and the output will be a ``Tensor``. The attribute ``name="some_name"`` is defined for better Tensorboard visualization as we see later in this tutorial. 64 | 65 | ------------------- 66 | Run the Experiment 67 | ------------------- 68 | 69 | The ``session``, which is the environment for running the operations, is executed as below: 70 | 71 | .. code:: python 72 | 73 | # Run the session 74 | with tf.Session() as sess: 75 | writer = tf.summary.FileWriter(os.path.expanduser(FLAGS.log_dir), sess.graph) 76 | print("output: ", sess.run(welcome)) 77 | 78 | # Closing the writer. 79 | writer.close() 80 | sess.close() 81 | 82 | The ``tf.summary.FileWriter`` is defined to write the summaries into ``event files``.The command of ``sess.run()`` must be used for evaluation of any ``Tensor`` otherwise the operation won't be executed. In the end by using the ``writer.close()``, the summary writer will be closed. 83 | 84 | 85 | -------------------------------------------------------------------------------- /docs/tutorials/1-basics/basic_math_operations/README.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Welcome to TensorFlow World 3 | ============================ 4 | 5 | .. _this link: https://github.com/astorfi/TensorFlow-World/tree/master/codes/0-welcome 6 | 7 | The tutorials in this section are just a start for going into the TensorFlow world. 8 | 9 | We using Tensorboard for visualizing the outcomes. TensorBoard is the graph visualization tools provided by TensorFlow. Using Google’s words: “The computations you'll use TensorFlow for - like training a massive deep neural network - can be complex and confusing. To make it easier to understand, debug, and optimize TensorFlow programs, we've included a suite of visualization tools called TensorBoard.” A simple Tensorboard implementation is used in this tutorial. 10 | 11 | **NOTE:*** 12 | 13 | * The details of summary operations, Tensorboard, and their advantages are beyond the scope of this tutorial and will be presented in more advanced tutorials. 14 | 15 | 16 | -------------------------- 17 | Preparing the environment 18 | -------------------------- 19 | 20 | At first, we have to import the necessary libraries. 21 | 22 | .. code:: python 23 | 24 | from __future__ import print_function 25 | import tensorflow as tf 26 | import os 27 | 28 | Since we are aimed to use Tensorboard, we need a directory to store the information (the operations and their corresponding outputs if desired by the user). This information is exported to ``event files`` by TensorFlow. The even files can be transformed to visual data such that the user is able to evaluate the architecture and the operations. The ``path`` to store these even files is defined as below: 29 | 30 | .. code:: python 31 | 32 | # The default path for saving event files is the same folder of this python file. 33 | tf.app.flags.DEFINE_string( 34 | 'log_dir', os.path.dirname(os.path.abspath(__file__)) + '/logs', 35 | 'Directory where event logs are written to.') 36 | 37 | # Store all elements in FLAG structure! 38 | FLAGS = tf.app.flags.FLAGS 39 | 40 | The ``os.path.dirname(os.path.abspath(__file__))`` gets the directory name of the current python file. The ``tf.app.flags.FLAGS`` points to all defined flags using the ``FLAGS`` indicator. From now on the flags can be called using ``FLAGS.flag_name``. 41 | 42 | For convenience, it is useful to only work with ``absolute paths``. By using the following script, the user is prompt to use absolute paths for the ``log_dir`` directory. 43 | 44 | .. code:: python 45 | 46 | # The user is prompted to input an absolute path. 47 | # os.path.expanduser is leveraged to transform '~' sign to the corresponding path indicator. 48 | # Example: '~/logs' equals to '/home/username/logs' 49 | if not os.path.isabs(os.path.expanduser(FLAGS.log_dir)): 50 | raise ValueError('You must assign absolute path for --log_dir') 51 | 52 | -------- 53 | Basics 54 | -------- 55 | 56 | Some basic math operations can be defined by TensorFlow: 57 | 58 | .. code:: python 59 | 60 | # Defining some constant values 61 | a = tf.constant(5.0, name="a") 62 | b = tf.constant(10.0, name="b") 63 | 64 | # Some basic operations 65 | x = tf.add(a, b, name="add") 66 | y = tf.div(a, b, name="divide") 67 | 68 | The ``tf.`` operator performs the specific operation and the output will be a ``Tensor``. The attribute ``name="some_name"`` is defined for better Tensorboard visualization as we see later in this tutorial. 69 | 70 | ------------------- 71 | Run the Experiment 72 | ------------------- 73 | 74 | The ``session``, which is the environment for running the operations, is executed as below: 75 | 76 | .. code:: python 77 | 78 | # Run the session 79 | with tf.Session() as sess: 80 | writer = tf.summary.FileWriter(os.path.expanduser(FLAGS.log_dir), sess.graph) 81 | print("output: ", sess.run([a,b,x,y])) 82 | 83 | # Closing the writer. 84 | writer.close() 85 | sess.close() 86 | 87 | The ``tf.summary.FileWriter`` is defined to write the summaries into ``event files``.The command of ``sess.run()`` must be used for evaluation of any ``Tensor`` otherwise the operation won't be executed. In the end by using the ``writer.close()``, the summary writer will be closed. 88 | 89 | -------- 90 | Results 91 | -------- 92 | 93 | The results for running in the terminal is as bellow: 94 | 95 | .. code:: shell 96 | 97 | [5.0, 10.0, 15.0, 0.5] 98 | 99 | 100 | If we run the Tensorboard using ``tensorboard --logdir="absolute/path/to/log_dir"`` we get the following when visualiaing the ``Graph``: 101 | 102 | .. figure:: https://github.com/astorfi/TensorFlow-World/blob/master/docs/_img/1-basics/basic_math_operations/graph-run.png 103 | :scale: 30 % 104 | :align: center 105 | 106 | **Figure 1:** The TensorFlow Graph. 107 | 108 | -------------------------------------------------------------------------------- /docs/tutorials/1-basics/readme.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Basics 3 | ============================== 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/tutorials/1-basics/variables/README.rst: -------------------------------------------------------------------------------- 1 | Introduction to TensorFlow Variables: Creation, Initialization 2 | -------------------------------------------------------------- 3 | 4 | This tutorial deals with defining and initializing TensorFlow variables. 5 | 6 | Introduction 7 | ------------ 8 | 9 | Defining ``variables`` is necessary because they hold the parameters. 10 | Without having parameters, training, updating, saving, restoring and any 11 | other operations cannot be performed. The defined variables in 12 | TensorFlow are just tensors with certain shapes and types. The tensors 13 | must be initialized with values to become valid. In this tutorial, we 14 | are going to explain how to ``define`` and ``initialize`` variables. The 15 | `source 16 | code `__ 17 | is available on the dedicated GitHub repository. 18 | 19 | Creating variables 20 | ------------------ 21 | 22 | For a variable generation, the class of tf.Variable() will be used. When 23 | we define a variable, we basically pass a ``tensor`` and its ``value`` 24 | to the graph. Basically, the following will happen: 25 | 26 | - A ``variable`` tensor that holds a value will be pass to the 27 | graph. 28 | - By using tf.assign, an initializer set initial variable value. 29 | 30 | Some arbitrary variables can be defined as follows: 31 | 32 | .. code:: python 33 | 34 | 35 | import tensorflow as tf 36 | from tensorflow.python.framework import ops 37 | 38 | ####################################### 39 | ######## Defining Variables ########### 40 | ####################################### 41 | 42 | # Create three variables with some default values. 43 | weights = tf.Variable(tf.random_normal([2, 3], stddev=0.1), 44 | name="weights") 45 | biases = tf.Variable(tf.zeros([3]), name="biases") 46 | custom_variable = tf.Variable(tf.zeros([3]), name="custom") 47 | 48 | # Get all the variables' tensors and store them in a list. 49 | all_variables_list = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) 50 | 51 | 52 | In the above script, ``ops.get_collection`` gets the list of all defined variables 53 | from the defined graph. The "name" key, define a specific name for each 54 | variable on the graph 55 | 56 | Initialization 57 | -------------- 58 | 59 | ``Initializers`` of the variables must be run before all other 60 | operations in the model. For an analogy, we can consider the starter of 61 | the car. Instead of running an initializer, variables can be 62 | ``restored`` too from saved models such as a checkpoint file. Variables 63 | can be initialized globally, specifically, or from other variables. We 64 | investigate different choices in the subsequent sections. 65 | 66 | Initializing Specific Variables 67 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | 69 | By using tf.variables\_initializer, we can explicitly command the 70 | TensorFlow to only initialize a certain variable. The script is as follows 71 | 72 | .. code:: python 73 | 74 | # "variable_list_custom" is the list of variables that we want to initialize. 75 | variable_list_custom = [weights, custom_variable] 76 | 77 | # The initializer 78 | init_custom_op = tf.variables_initializer(var_list=all_variables_list) 79 | 80 | Noted that custom initialization does not mean that we don't need to 81 | initialize other variables! All variables that some operations will be 82 | done upon them over the graph, must be initialized or restored from 83 | saved variables. This only allows us to realize how we can initialize 84 | specific variables by hand. 85 | 86 | Global variable initialization 87 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | 89 | All variables can be initialized at once using the 90 | tf.global\_variables\_initializer(). This op must be run after the model constructed. 91 | The script is as below: 92 | 93 | .. code:: python 94 | 95 | # Method-1 96 | # Add an op to initialize the variables. 97 | init_all_op = tf.global_variables_initializer() 98 | 99 | # Method-2 100 | init_all_op = tf.variables_initializer(var_list=all_variables_list) 101 | 102 | Both the above methods are identical. We only provide the second one to 103 | demonstrate that the ``tf.global_variables_initializer()`` is nothing 104 | but ``tf.variables_initializer`` when you yield all the variables as the input argument. 105 | 106 | Initialization of a variables using other existing variables 107 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 108 | 109 | New variables can be initialized using other existing variables' initial 110 | values by taking the values using initialized\_value(). 111 | 112 | Initialization using predefined variables' values 113 | 114 | .. code:: python 115 | 116 | # Create another variable with the same value as 'weights'. 117 | WeightsNew = tf.Variable(weights.initialized_value(), name="WeightsNew") 118 | 119 | # Now, the variable must be initialized. 120 | init_WeightsNew_op = tf.variables_initializer(var_list=[WeightsNew]) 121 | 122 | As it can be seen from the above script, the ``WeightsNew`` variable is 123 | initialized with the values of the ``weights`` predefined value. 124 | 125 | Running the session 126 | ------------------- 127 | 128 | All we did so far was to define the initializers' ops and put them on the 129 | graph. In order to truly initialize variables, the defined initializers' 130 | ops must be run in the session. The script is as follows: 131 | 132 | Running the session for initialization 133 | 134 | .. code:: python 135 | 136 | with tf.Session() as sess: 137 | # Run the initializer operation. 138 | sess.run(init_all_op) 139 | sess.run(init_custom_op) 140 | sess.run(init_WeightsNew_op) 141 | 142 | Each of the initializers has been run separated using a session. 143 | 144 | Summary 145 | ------- 146 | 147 | In this tutorial, we walked through the variable creation and 148 | initialization. The global, custom and inherited variable initialization 149 | have been investigated. In the future posts, we investigate how to save 150 | and restore the variables. Restoring a variable eliminate the necessity 151 | of its initialization. 152 | 153 | -------------------------------------------------------------------------------- /docs/tutorials/2-basics_in_machine_learning/linear_regression/README.rst: -------------------------------------------------------------------------------- 1 | 2 | Sections 3 | ~~~~~~~~ 4 | 5 | - `Introduction <#Introduction>`__ 6 | - `Description of the Overall 7 | Process <#Description%20of%20the%20Overall%20Process>`__ 8 | - `How to Do It in Code? <#How%20to%20Do%20It%20in%20Code?>`__ 9 | - `Summary <#Summary>`__ 10 | 11 | Linear Regression using TensorFlow 12 | ---------------------------------- 13 | 14 | This tutorial is about training a linear model by TensorFlow to fit the 15 | data. Alternatively, you can check this `blog post `_. 16 | 17 | .. _blogpostlinearregression: http://www.machinelearninguru.com/deep_learning/tensorflow/machine_learning_basics/linear_regresstion/linear_regression.html 18 | 19 | 20 | 21 | Introduction 22 | ------------ 23 | 24 | In machine learning and statistics, Linear Regression is the modeling of 25 | the relationship between a variable such as Y and at least one 26 | independent variable as X. In the linear regression, the linear 27 | relationship will be modeled by a predictor function which its 28 | parameters will be estimated by the data and is called a Linear Model. 29 | The main advantage of Linear Regression algorithm is its simplicity using 30 | which it is very straightforward to interpret the new model and map the 31 | data into a new space. In this article, we will introduce how to train a 32 | linear model using TensorFLow and how to showcase the generated model. 33 | 34 | Description of the Overall Process 35 | ---------------------------------- 36 | 37 | In order to train the model, the TensorFlow loops through the data and 38 | it should find the optimal line (as we have a linear model) that fits 39 | the data. The linear relationship between two variables of X, Y is 40 | estimated by designing an appropriate optimization problem for which the requirement 41 | is a proper loss function. The dataset is available from the 42 | `Stanford course CS 43 | 20SI `__: TensorFlow 44 | for Deep Learning Research. 45 | 46 | How to Do It in Code? 47 | --------------------- 48 | 49 | The process is started by loading the necessary libraries and the 50 | dataset: 51 | 52 | .. code:: python 53 | 54 | 55 | # Data file provided by the Stanford course CS 20SI: TensorFlow for Deep Learning Research. 56 | # https://github.com/chiphuyen/tf-stanford-tutorials 57 | DATA_FILE = "data/fire_theft.xls" 58 | 59 | # read the data from the .xls file. 60 | book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8") 61 | sheet = book.sheet_by_index(0) 62 | data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)]) 63 | num_samples = sheet.nrows - 1 64 | 65 | ####################### 66 | ## Defining flags ##### 67 | ####################### 68 | tf.app.flags.DEFINE_integer( 69 | 'num_epochs', 50, 'The number of epochs for training the model. Default=50') 70 | # Store all elements in FLAG structure! 71 | FLAGS = tf.app.flags.FLAGS 72 | 73 | Then we continue by defining and initializing the necessary variables: 74 | 75 | .. code:: python 76 | 77 | # creating the weight and bias. 78 | # The defined variables will be initialized to zero. 79 | W = tf.Variable(0.0, name="weights") 80 | b = tf.Variable(0.0, name="bias") 81 | 82 | After that, we should define the necessary functions. Different tabs 83 | demonstrate the defined functions: 84 | 85 | .. code:: python 86 | 87 | def inputs(): 88 | """ 89 | Defining the place_holders. 90 | :return: 91 | Returning the data and label lace holders. 92 | """ 93 | X = tf.placeholder(tf.float32, name="X") 94 | Y = tf.placeholder(tf.float32, name="Y") 95 | return X,Y 96 | 97 | .. code:: python 98 | 99 | def inference(): 100 | """ 101 | Forward passing the X. 102 | :param X: Input. 103 | :return: X*W + b. 104 | """ 105 | return X * W + b 106 | 107 | .. code:: python 108 | 109 | def loss(X, Y): 110 | """ 111 | compute the loss by comparing the predicted value to the actual label. 112 | :param X: The input. 113 | :param Y: The label. 114 | :return: The loss over the samples. 115 | """ 116 | 117 | # Making the prediction. 118 | Y_predicted = inference(X) 119 | return tf.squared_difference(Y, Y_predicted) 120 | 121 | .. code:: python 122 | 123 | # The training function. 124 | def train(loss): 125 | learning_rate = 0.0001 126 | return tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) 127 | 128 | Next, we are going to loop through different epochs of data and perform 129 | the optimization process: 130 | 131 | .. code:: python 132 | 133 | with tf.Session() as sess: 134 | 135 | # Initialize the variables[w and b]. 136 | sess.run(tf.global_variables_initializer()) 137 | 138 | # Get the input tensors 139 | X, Y = inputs() 140 | 141 | # Return the train loss and create the train_op. 142 | train_loss = loss(X, Y) 143 | train_op = train(train_loss) 144 | 145 | # Step 8: train the model 146 | for epoch_num in range(FLAGS.num_epochs): # run 100 epochs 147 | for x, y in data: 148 | train_op = train(train_loss) 149 | 150 | # Session runs train_op to minimize loss 151 | loss_value,_ = sess.run([train_loss,train_op], feed_dict={X: x, Y: y}) 152 | 153 | # Displaying the loss per epoch. 154 | print('epoch %d, loss=%f' %(epoch_num+1, loss_value)) 155 | 156 | # save the values of weight and bias 157 | wcoeff, bias = sess.run([W, b]) 158 | 159 | In the above code, the sess.run(tf.global\_variables\_initializer()) 160 | initialize all the defined variables globally. The train\_op is built 161 | upon the train\_loss and will be updated in each step. In the end, the 162 | parameters of the linear model, e.g., wcoeff and bias, will be returned. 163 | For evaluation, the prediction line and the original data will be 164 | demonstrated to show how the model fits the data: 165 | 166 | .. code:: python 167 | 168 | ############################### 169 | #### Evaluate and plot ######## 170 | ############################### 171 | Input_values = data[:,0] 172 | Labels = data[:,1] 173 | Prediction_values = data[:,0] * wcoeff + bias 174 | plt.plot(Input_values, Labels, 'ro', label='main') 175 | plt.plot(Input_values, Prediction_values, label='Predicted') 176 | 177 | # Saving the result. 178 | plt.legend() 179 | plt.savefig('plot.png') 180 | plt.close() 181 | 182 | The result is depicted in the following figure: 183 | 184 | .. figure:: https://github.com/astorfi/TensorFlow-World/blob/master/docs/_img/2-basics_in_machine_learning/linear_regression/updating_model.gif 185 | :scale: 50 % 186 | :align: center 187 | 188 | **Figure 1:** The original data alongside with the estimated linear 189 | model. 190 | 191 | The above animated GIF shows the model with some tiny movements which 192 | demonstrate the updating process. As it can be observed, the linear 193 | model is not certainly among the bests! However, as we mentioned, its 194 | simplicity is its advantage! 195 | 196 | Summary 197 | ------- 198 | 199 | In this tutorial, we walked through the linear model creation using 200 | TensorFlow. The line which was found after training is not guaranteed 201 | to be the best one. Different parameters affect the convergence 202 | accuracy. The linear model is found using stochastic optimization and 203 | its simplicity makes our world easier. 204 | -------------------------------------------------------------------------------- /docs/tutorials/2-basics_in_machine_learning/logistic_regression/README.rst: -------------------------------------------------------------------------------- 1 | 2 | 3 | Sections 4 | ~~~~~~~~ 5 | 6 | - `Introduction <#Introduction>`__ 7 | - `Description of the Overall 8 | Process <#Description%20of%20the%20Overall%20Process>`__ 9 | - `How to Do It in Code? <#How%20to%20Do%20It%20in%20Code?>`__ 10 | - `Summary <#Summary>`__ 11 | 12 | Logistic Regression using TensorFlow 13 | ------------------------------------ 14 | 15 | This tutorial is about training a logistic regression by TensorFlow for 16 | binary classification. 17 | 18 | Introduction 19 | ------------ 20 | 21 | In `Linear Regression using 22 | TensorFlow `__ 23 | post we described how to predict continuous-valued parameters by 24 | linearly modeling the system. What if the objective is to decide between 25 | two choices? The answer is simple: we are dealing with a classification 26 | problem. In this tutorial, the objective to decide whether the input 27 | image is digit "0" or digit "1" using Logistic Regression. In another 28 | word, whether it is digit "1" or not! The full source code is available 29 | in the associated `GitHub 30 | repository `__. 31 | 32 | Dataset 33 | ------- 34 | 35 | The dataset that we work on that in this tutorial is the 36 | `MNIST `__ dataset. The main dataset 37 | consists of 55000 training and 10000 test images. The images are 28x28x1 38 | which each of them represent a hand-written digit from 0 to 9. We create 39 | feature vectors of size 784 of each image. We only use 0 and 1 images 40 | for our setting. 41 | 42 | Logistic Regression 43 | ------------------- 44 | 45 | In linear regression the effort is to predict the outcome continuous 46 | value using the linear function of $y=W^{T}x$. On the other hand, in 47 | logistic regression we are determined to predict a binary label as 48 | $y\\in\\{0,1\\}$ in which we use a different prediction process as 49 | opposed to linear regression. In logistic regression, the predicted 50 | output is the probability that the input sample belongs to a targeted 51 | class which is digit "1" in our case. In a binary-classification 52 | problem, obviously if the $P(x\\in\\{target\\\_class\\})$ = M, then 53 | $P(x\\in\\{non\\\_target\\\_class\\}) = 1 - M$. So the hypothesis can be 54 | created as follows: 55 | 56 | $$P(y=1\|x)=h\_{W}(x)={{1}\\over{1+exp(-W^{T}x)}}=Sigmoid(W^{T}x) \\ \\ 57 | \\ (1)$$ $$P(y=0\|x)=1 - P(y=1\|x) = 1 - h\_{W}(x) \\ \\ \\ (2)$$ 58 | 59 | In the above equations, Sigmoid function maps the predicted output into 60 | probability space in which the values are in the range of $[0,1]$. The main 61 | objective is to find the model using which when the input sample is "1" 62 | the output becomes a high probability and becomes small otherwise. The 63 | important objective is to design the appropriate cost function to 64 | minimize the loss when the output is desired and vice versa. The cost 65 | function for a set of data such as $(x^{i},y^{i})$ can be defined as 66 | below: 67 | 68 | $$Loss(W) = 69 | \\sum\_{i}{y^{(i)}log{1\\over{h\_{W}(x^{i})}}+(1-y^{(i)})log{1\\over{1-h\_{W}(x^{i})}}}$$ 70 | 71 | As it can be seen from the above equation, the loss function consists of 72 | two term and in each sample only one of them is non-zero considering the 73 | binary labels. 74 | 75 | Up to now, we defined the formulation and optimization function of the 76 | logistic regression. In the next part, we show how to do it in code using 77 | mini-batch optimization. 78 | 79 | Description of the Overall Process 80 | ---------------------------------- 81 | 82 | At first, we process the dataset and extract only "0" and "1" digits. The 83 | code implemented for logistic regression is heavily inspired by our 84 | `Train a Convolutional Neural Network as a 85 | Classifier `__ 86 | post. We refer to the aforementioned post for having a better 87 | understanding of the implementation details. In this tutorial, we only 88 | explain how we process dataset and how to implement logistic regression 89 | and the rest is clear from the CNN classifier post that we referred 90 | earlier. 91 | 92 | How to Do It in Code? 93 | --------------------- 94 | 95 | In this part, we explain how to extract desired samples from dataset and 96 | to implement logistic regression using Softmax. 97 | 98 | Process Dataset 99 | ~~~~~~~~~~~~~~~ 100 | 101 | At first, we need to extract "0" and "1" digits from MNIST dataset: 102 | 103 | .. code:: python 104 | 105 | from tensorflow.examples.tutorials.mnist import input_data 106 | mnist = input_data.read_data_sets("MNIST_data/", reshape=True, one_hot=False) 107 | 108 | ######################## 109 | ### Data Processing #### 110 | ######################## 111 | # Organize the data and feed it to associated dictionaries. 112 | data={} 113 | 114 | data['train/image'] = mnist.train.images 115 | data['train/label'] = mnist.train.labels 116 | data['test/image'] = mnist.test.images 117 | data['test/label'] = mnist.test.labels 118 | 119 | # Get only the samples with zero and one label for training. 120 | index_list_train = [] 121 | for sample_index in range(data['train/label'].shape[0]): 122 | label = data['train/label'][sample_index] 123 | if label == 1 or label == 0: 124 | index_list_train.append(sample_index) 125 | 126 | # Reform the train data structure. 127 | data['train/image'] = mnist.train.images[index_list_train] 128 | data['train/label'] = mnist.train.labels[index_list_train] 129 | 130 | 131 | # Get only the samples with zero and one label for test set. 132 | index_list_test = [] 133 | for sample_index in range(data['test/label'].shape[0]): 134 | label = data['test/label'][sample_index] 135 | if label == 1 or label == 0: 136 | index_list_test.append(sample_index) 137 | 138 | # Reform the test data structure. 139 | data['test/image'] = mnist.test.images[index_list_test] 140 | data['test/label'] = mnist.test.labels[index_list_test] 141 | 142 | The code looks to be verbose but it's very simple actually. All we want 143 | is implemented in lines 28-32 in which the desired data samples are 144 | extracted. Next, we have to dig into logistic regression architecture. 145 | 146 | Logistic Regression Implementation 147 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 148 | 149 | The logistic regression structure is simply feeding-forwarding the input 150 | features through a fully-connected layer in which the last layer only 151 | has two classes. The fully-connected architecture can be defined as 152 | below: 153 | 154 | .. code:: python 155 | 156 | ############################################### 157 | ########### Defining place holders ############ 158 | ############################################### 159 | image_place = tf.placeholder(tf.float32, shape=([None, num_features]), name='image') 160 | label_place = tf.placeholder(tf.int32, shape=([None,]), name='gt') 161 | label_one_hot = tf.one_hot(label_place, depth=FLAGS.num_classes, axis=-1) 162 | dropout_param = tf.placeholder(tf.float32) 163 | 164 | ################################################## 165 | ########### Model + Loss + Accuracy ############## 166 | ################################################## 167 | # A simple fully connected with two class and a Softmax is equivalent to Logistic Regression. 168 | logits = tf.contrib.layers.fully_connected(inputs=image_place, num_outputs = FLAGS.num_classes, scope='fc') 169 | 170 | The first few lines are defining place-holders in order to put the 171 | desired values on the graph. Please refer to `this 172 | post `__ 173 | for further details. The desired loss function can easily be implemented 174 | using TensorFlow using the following script: 175 | 176 | .. code:: python 177 | 178 | # Define loss 179 | with tf.name_scope('loss'): 180 | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=label_one_hot)) 181 | 182 | # Accuracy 183 | with tf.name_scope('accuracy'): 184 | # Evaluate the model 185 | correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(label_one_hot, 1)) 186 | 187 | # Accuracy calculation 188 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 189 | 190 | The tf.nn.softmax\_cross\_entropy\_with\_logits function does the work. 191 | It optimizes the previously defined cost function with a subtle 192 | difference. It generates two inputs in which even if the sample is digit 193 | "0", the correspondent probability will be high. So 194 | tf.nn.softmax\_cross\_entropy\_with\_logits function, for each class 195 | predict a probability and inherently on its own, makes the decision. 196 | 197 | Summary 198 | ------- 199 | 200 | In this tutorial, we described logistic regression and represented how to 201 | implement it in code. Instead of making a decision based on the output 202 | probability based on a targeted class, we extended the problem to a two 203 | class problem in which for each class we predict the probability. In the future posts, we will extend this problem to multi-class problem and we 204 | show it can be done with the similar approach. 205 | -------------------------------------------------------------------------------- /docs/tutorials/3-neural_network/autoencoder/README.rst: -------------------------------------------------------------------------------- 1 | Autoencoders and their implementations in TensorFlow 2 | ---------------------------------------------------- 3 | 4 | In this post, you will learn the concept behind Autoencoders as well how 5 | to implement an autoencoder in TensorFlow. 6 | 7 | Introduction 8 | ------------ 9 | 10 | Autoencoders are a type of neural networks which copy its input to its 11 | output. They usually consist of two main parts, namely Encoder and 12 | Decoder. The encoder map the input into a hidden layer space which we 13 | refer to as a code. The decoder then reconstructs the input from the 14 | code. There are different types of Autoencoders: 15 | 16 | - **Undercomplete Autoencoders:** An autoencoder whose code 17 | dimension is less than the input dimension. Learning such an 18 | autoencoder forces it to capture the most salient features. 19 | However, using a big encoder and decoder in the lack of enough 20 | training data allows the network to memorized the task and omits 21 | learning useful features. In case of having linear decoder, it can 22 | act as PCA. However, adding nonlinear activation functions to the 23 | network makes it a nonlinear generalization of PCA. 24 | - **Regularized Autoencoders:** Rather than limiting the size of 25 | autoencoder and the code dimension for the sake of feature 26 | learning, we can add a loss function to prevent it memorizing the 27 | task and the training data. 28 | - **Sparse Autoencoders:** An autoencoder which has a sparsity 29 | penalty in the training loss in addition to the 30 | reconstruction error. They usually being used for the 31 | porpuse of other tasks such as classification. The loss is 32 | not as straightforward as other regularizers, and we will 33 | discuss it in another post later. 34 | - **Denoising Autoencoders (DAE):** The input of a DAE is a 35 | corrupted copy of the real input which is supposed to be 36 | reconstructed. Therefore, a DAE has to undo the corruption 37 | (noise) as well as reconstruction. 38 | - **Contractive Autoencoders (CAE):** The main idea behind 39 | these type of autoencoders is to learn a representation of 40 | the data which is robust to small changes in the input. 41 | - **Variational Autoencoders:** They maximize the probability of the 42 | training data instead of copying the input to the output and 43 | therefore does not need regularization to capture useful 44 | information. 45 | 46 | In this post, we are going to create a simple Undercomplete Autoencoder 47 | in TensorFlow to learn a low dimension representation (code) of the 48 | MNIST dataset. 49 | 50 | Create an Undercomplete Autoencoder 51 | ----------------------------------- 52 | 53 | We are going to create an autoencoder with a 3-layer encoder and 3-layer 54 | decoder. Each layer of encoder downsamples its input along the spatial 55 | dimensions (width, height) by a factor of two using a stride 2. 56 | Consequently, the dimension of the code is 2(width) X 2(height) X 57 | 8(depth) = 32 (for an image of 32X32). Similarly, each layer of the 58 | decoder upsamples its input by a factor of two (using transpose 59 | convolution with stride 2). 60 | 61 | .. code-block:: python 62 | 63 | import tensorflow.contrib.layers as lays 64 | 65 | def autoencoder(inputs): 66 | # encoder 67 | # 32 file code blockx 32 x 1 -> 16 x 16 x 32 68 | # 16 x 16 x 32 -> 8 x 8 x 16 69 | # 8 x 8 x 16 -> 2 x 2 x 8 70 | net = lays.conv2d(inputs, 32, [5, 5], stride=2, padding='SAME') 71 | net = lays.conv2d(net, 16, [5, 5], stride=2, padding='SAME') 72 | net = lays.conv2d(net, 8, [5, 5], stride=4, padding='SAME') 73 | # decoder 74 | # 2 x 2 x 8 -> 8 x 8 x 16 75 | # 8 x 8 x 16 -> 16 x 16 x 32 76 | # 16 x 16 x 32 -> 32 x 32 x 1 77 | net = lays.conv2d_transpose(net, 16, [5, 5], stride=4, padding='SAME') 78 | net = lays.conv2d_transpose(net, 32, [5, 5], stride=2, padding='SAME') 79 | net = lays.conv2d_transpose(net, 1, [5, 5], stride=2, padding='SAME', activation_fn=tf.nn.tanh) 80 | return net 81 | 82 | .. figure:: ../../../_img/3-neural_network/autoencoder/ae.png 83 | :scale: 50 % 84 | :align: center 85 | 86 | **Figure 1:** Autoencoder 87 | 88 | The MNIST dataset contains vectorized images of 28X28. Therefore we 89 | define a new function to reshape each batch of MNIST images to 28X28 and 90 | then resize to 32X32. The reason of resizing to 32X32 is to make it a 91 | power of two and therefore we can easily use the stride of 2 for 92 | downsampling and upsampling. 93 | 94 | .. code-block:: python 95 | 96 | import numpy as np 97 | from skimage import transform 98 | 99 | def resize_batch(imgs): 100 | # A function to resize a batch of MNIST images to (32, 32) 101 | # Args: 102 | # imgs: a numpy array of size [batch_size, 28 X 28]. 103 | # Returns: 104 | # a numpy array of size [batch_size, 32, 32]. 105 | imgs = imgs.reshape((-1, 28, 28, 1)) 106 | resized_imgs = np.zeros((imgs.shape[0], 32, 32, 1)) 107 | for i in range(imgs.shape[0]): 108 | resized_imgs[i, ..., 0] = transform.resize(imgs[i, ..., 0], (32, 32)) 109 | return resized_imgs 110 | 111 | Now we create an autoencoder, define a square error loss and an 112 | optimizer. 113 | 114 | 115 | .. code-block:: python 116 | 117 | import tensorflow as tf 118 | 119 | ae_inputs = tf.placeholder(tf.float32, (None, 32, 32, 1)) # input to the network (MNIST images) 120 | ae_outputs = autoencoder(ae_inputs) # create the Autoencoder network 121 | 122 | # calculate the loss and optimize the network 123 | loss = tf.reduce_mean(tf.square(ae_outputs - ae_inputs)) # claculate the mean square error loss 124 | train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss) 125 | 126 | # initialize the network 127 | init = tf.global_variables_initializer() 128 | 129 | Now we can read the batches, train the network and finally test the 130 | network by reconstructing a batch of test images. 131 | 132 | 133 | .. code-block:: python 134 | 135 | from tensorflow.examples.tutorials.mnist import input_data 136 | 137 | batch_size = 500 # Number of samples in each batch 138 | epoch_num = 5 # Number of epochs to train the network 139 | lr = 0.001 # Learning rate 140 | 141 | # read MNIST dataset 142 | mnist = input_data.read_data_sets("MNIST_data", one_hot=True) 143 | 144 | # calculate the number of batches per epoch 145 | batch_per_ep = mnist.train.num_examples // batch_size 146 | 147 | with tf.Session() as sess: 148 | sess.run(init) 149 | for ep in range(epoch_num): # epochs loop 150 | for batch_n in range(batch_per_ep): # batches loop 151 | batch_img, batch_label = mnist.train.next_batch(batch_size) # read a batch 152 | batch_img = batch_img.reshape((-1, 28, 28, 1)) # reshape each sample to an (28, 28) image 153 | batch_img = resize_batch(batch_img) # reshape the images to (32, 32) 154 | _, c = sess.run([train_op, loss], feed_dict={ae_inputs: batch_img}) 155 | print('Epoch: {} - cost= {:.5f}'.format((ep + 1), c)) 156 | 157 | # test the trained network 158 | batch_img, batch_label = mnist.test.next_batch(50) 159 | batch_img = resize_batch(batch_img) 160 | recon_img = sess.run([ae_outputs], feed_dict={ae_inputs: batch_img})[0] 161 | 162 | # plot the reconstructed images and their ground truths (inputs) 163 | plt.figure(1) 164 | plt.title('Reconstructed Images') 165 | for i in range(50): 166 | plt.subplot(5, 10, i+1) 167 | plt.imshow(recon_img[i, ..., 0], cmap='gray') 168 | plt.figure(2) 169 | plt.title('Input Images') 170 | for i in range(50): 171 | plt.subplot(5, 10, i+1) 172 | plt.imshow(batch_img[i, ..., 0], cmap='gray') 173 | plt.show() 174 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | python-coveralls 4 | tensorflow 5 | matplotlib 6 | xlrd 7 | scikit-learn 8 | pandas 9 | scikit-image 10 | -------------------------------------------------------------------------------- /welcome.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/TensorFlow-Course/5da01f512b1f20701d88e6abe8c6414b57f73f61/welcome.py --------------------------------------------------------------------------------