├── .gitignore ├── .nojekyll ├── CONTRIBUTING.md ├── Instructors └── Slides │ ├── CNTK.pptx │ ├── Deep Learning.pptx │ ├── Softmax.pptx │ └── Transfer Learning.pptx ├── LICENSE-CODE.TXT ├── NOTICE.TXT ├── README.md ├── Students ├── 1-start │ ├── dsvm │ │ ├── dsvm-for-dl.pptx │ │ └── lab │ │ │ ├── 0-dsvm-deploy-script │ │ │ ├── 0-provision-dsvm.md │ │ │ ├── deploydsvm.sh │ │ │ └── imgs │ │ │ │ └── azlogin.png │ │ │ ├── 1-update-disk-size-script │ │ │ ├── expand-osdisk.sh │ │ │ └── get-osdisk-name.sh │ │ │ ├── 2-updating-CNTK │ │ │ ├── 1-Upgrading-CNTK.md │ │ │ └── cntk-install.sh │ │ │ └── 3-delete-script │ │ │ ├── deallocate-vm.sh │ │ │ └── delete-rg.sh │ └── intro.pptx ├── 10-adversarial-attacks │ └── tensorflow │ │ ├── Network-Visualization-TensorFlow.ipynb │ │ └── utils │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── captioning_solver.py │ │ ├── classifiers │ │ ├── __init__.py │ │ ├── rnn.py │ │ └── squeezenet.py │ │ ├── coco_utils.py │ │ ├── data_utils.py │ │ ├── datasets │ │ ├── get_coco_captioning.sh │ │ ├── get_data.sh │ │ ├── get_imagenet_val.sh │ │ └── get_squeezenet_tf.sh │ │ ├── fast_layers.py │ │ ├── gradient_check.py │ │ ├── im2col.py │ │ ├── im2col_cython.pyx │ │ ├── image_utils.py │ │ ├── layer_utils.py │ │ ├── layers.py │ │ ├── optim.py │ │ ├── rnn_layers.py │ │ └── setup.py ├── 11-gans │ ├── CIFAR-10_DataLoader.ipynb │ ├── Synthesizing-Celebs-BEGAN.ipynb │ └── WGAN-LSGAN.ipynb ├── 12-biased-embeddings │ ├── how-to-make-a-racist-ai-without-really-trying.ipynb │ └── racist-ai.zip ├── 1b-computational-graphs │ └── comp-graphs.pptx ├── 2-transfer-learning │ ├── CNTK_301_Image_Recognition_with_Deep_Transfer_Learning.ipynb │ ├── CatsvsDogs │ │ ├── CNTK-Transfer-Cats-Dogs.ipynb │ │ └── utils │ │ │ └── download_model.py │ ├── README.md │ ├── TransferLearning.py │ ├── TransferLearning_Extended.py │ └── install_data_and_model.py ├── 3-keras │ ├── Fashion-MNIST-keras.ipynb │ ├── Keras_CNTK_VGG_CIFAR.ipynb │ ├── common │ │ ├── __init__.py │ │ ├── params.py │ │ └── utils.py │ └── fashion_import.py ├── 4-cntk-overview │ └── cntk-overview.pptx ├── 5-cntk-cifar10 │ ├── CNTK_CIFAR_Trainer_API.ipynb │ ├── CNTK_CIFAR_highAPI.ipynb │ └── common │ │ ├── __init__.py │ │ ├── params.py │ │ └── utils.py ├── 6-style-transfer │ ├── Neural_Style.ipynb │ ├── content.jpg │ └── style.jpg ├── 7-object-detection-frnn │ ├── Grocery │ │ └── testImages │ │ │ ├── WIN_20160803_11_28_42_Pro.bboxes.labels.tsv │ │ │ ├── WIN_20160803_11_28_42_Pro.bboxes.tsv │ │ │ ├── WIN_20160803_11_28_42_Pro.jpg │ │ │ ├── WIN_20160803_11_42_36_Pro.bboxes.labels.tsv │ │ │ ├── WIN_20160803_11_42_36_Pro.bboxes.tsv │ │ │ ├── WIN_20160803_11_42_36_Pro.jpg │ │ │ ├── WIN_20160803_11_46_03_Pro.bboxes.labels.tsv │ │ │ ├── WIN_20160803_11_46_03_Pro.bboxes.tsv │ │ │ ├── WIN_20160803_11_46_03_Pro.jpg │ │ │ ├── WIN_20160803_11_48_26_Pro.bboxes.labels.tsv │ │ │ ├── WIN_20160803_11_48_26_Pro.bboxes.tsv │ │ │ ├── WIN_20160803_11_48_26_Pro.jpg │ │ │ ├── WIN_20160803_12_37_07_Pro.bboxes.labels.tsv │ │ │ ├── WIN_20160803_12_37_07_Pro.bboxes.tsv │ │ │ └── WIN_20160803_12_37_07_Pro.jpg │ ├── ObjectDetectionGroceries.ipynb │ ├── README.md │ ├── cntk_helpers.py │ ├── environment.yml │ ├── fastRCNN │ │ ├── __init__.py │ │ ├── imdb.py │ │ ├── nms.py │ │ ├── pascal_voc.py │ │ ├── test.py │ │ ├── timer.py │ │ ├── train_svms.py │ │ ├── utils │ │ │ ├── cython_bbox.pyd │ │ │ ├── cython_bbox.so │ │ │ ├── cython_nms.pyd │ │ │ └── cython_nms.so │ │ └── voc_eval.py │ └── selectivesearch │ │ ├── README.md │ │ ├── __init__.py │ │ └── selectivesearch.py ├── 8-char-rnn │ └── CharLM-Shakespeare.ipynb └── 9-imdb │ ├── CNTK_IMDB.ipynb │ ├── LSTM_Keras_CNTK_IMDB.ipynb │ └── common │ ├── __init__.py │ ├── params_lstm.py │ └── utils.py ├── docs ├── CNTK-Transfer-Cats-Dogs_files │ ├── CNTK-Transfer-Cats-Dogs_30_0.svg │ ├── CNTK-Transfer-Cats-Dogs_33_1.png │ ├── CNTK-Transfer-Cats-Dogs_34_1.png │ ├── CNTK-Transfer-Cats-Dogs_37_0.png │ ├── CNTK-Transfer-Cats-Dogs_38_0.png │ ├── CNTK-Transfer-Cats-Dogs_64_0.png │ └── CNTK-Transfer-Cats-Dogs_65_0.png ├── Network-Visualization-TensorFlow_files │ ├── Network-Visualization-TensorFlow_13_0.png │ ├── Network-Visualization-TensorFlow_17_0.png │ ├── Network-Visualization-TensorFlow_22_0.png │ ├── Network-Visualization-TensorFlow_22_1.png │ ├── Network-Visualization-TensorFlow_22_10.png │ ├── Network-Visualization-TensorFlow_22_11.png │ ├── Network-Visualization-TensorFlow_22_12.png │ ├── Network-Visualization-TensorFlow_22_13.png │ ├── Network-Visualization-TensorFlow_22_14.png │ ├── Network-Visualization-TensorFlow_22_15.png │ ├── Network-Visualization-TensorFlow_22_16.png │ ├── Network-Visualization-TensorFlow_22_17.png │ ├── Network-Visualization-TensorFlow_22_18.png │ ├── Network-Visualization-TensorFlow_22_19.png │ ├── Network-Visualization-TensorFlow_22_2.png │ ├── Network-Visualization-TensorFlow_22_20.png │ ├── Network-Visualization-TensorFlow_22_21.png │ ├── Network-Visualization-TensorFlow_22_22.png │ ├── Network-Visualization-TensorFlow_22_23.png │ ├── Network-Visualization-TensorFlow_22_24.png │ ├── Network-Visualization-TensorFlow_22_25.png │ ├── Network-Visualization-TensorFlow_22_26.png │ ├── Network-Visualization-TensorFlow_22_27.png │ ├── Network-Visualization-TensorFlow_22_28.png │ ├── Network-Visualization-TensorFlow_22_29.png │ ├── Network-Visualization-TensorFlow_22_3.png │ ├── Network-Visualization-TensorFlow_22_30.png │ ├── Network-Visualization-TensorFlow_22_31.png │ ├── Network-Visualization-TensorFlow_22_32.png │ ├── Network-Visualization-TensorFlow_22_33.png │ ├── Network-Visualization-TensorFlow_22_34.png │ ├── Network-Visualization-TensorFlow_22_35.png │ ├── Network-Visualization-TensorFlow_22_36.png │ ├── Network-Visualization-TensorFlow_22_37.png │ ├── Network-Visualization-TensorFlow_22_38.png │ ├── Network-Visualization-TensorFlow_22_39.png │ ├── Network-Visualization-TensorFlow_22_4.png │ ├── Network-Visualization-TensorFlow_22_40.png │ ├── Network-Visualization-TensorFlow_22_5.png │ ├── Network-Visualization-TensorFlow_22_6.png │ ├── Network-Visualization-TensorFlow_22_7.png │ ├── Network-Visualization-TensorFlow_22_8.png │ ├── Network-Visualization-TensorFlow_22_9.png │ ├── Network-Visualization-TensorFlow_24_1.png │ ├── Network-Visualization-TensorFlow_24_10.png │ ├── Network-Visualization-TensorFlow_24_11.png │ ├── Network-Visualization-TensorFlow_24_12.png │ ├── Network-Visualization-TensorFlow_24_13.png │ ├── Network-Visualization-TensorFlow_24_14.png │ ├── Network-Visualization-TensorFlow_24_15.png │ ├── Network-Visualization-TensorFlow_24_16.png │ ├── Network-Visualization-TensorFlow_24_17.png │ ├── Network-Visualization-TensorFlow_24_18.png │ ├── Network-Visualization-TensorFlow_24_19.png │ ├── Network-Visualization-TensorFlow_24_2.png │ ├── Network-Visualization-TensorFlow_24_20.png │ ├── Network-Visualization-TensorFlow_24_21.png │ ├── Network-Visualization-TensorFlow_24_22.png │ ├── Network-Visualization-TensorFlow_24_23.png │ ├── Network-Visualization-TensorFlow_24_24.png │ ├── Network-Visualization-TensorFlow_24_25.png │ ├── Network-Visualization-TensorFlow_24_26.png │ ├── Network-Visualization-TensorFlow_24_27.png │ ├── Network-Visualization-TensorFlow_24_28.png │ ├── Network-Visualization-TensorFlow_24_29.png │ ├── Network-Visualization-TensorFlow_24_3.png │ ├── Network-Visualization-TensorFlow_24_30.png │ ├── Network-Visualization-TensorFlow_24_31.png │ ├── Network-Visualization-TensorFlow_24_32.png │ ├── Network-Visualization-TensorFlow_24_33.png │ ├── Network-Visualization-TensorFlow_24_34.png │ ├── Network-Visualization-TensorFlow_24_35.png │ ├── Network-Visualization-TensorFlow_24_36.png │ ├── Network-Visualization-TensorFlow_24_37.png │ ├── Network-Visualization-TensorFlow_24_38.png │ ├── Network-Visualization-TensorFlow_24_39.png │ ├── Network-Visualization-TensorFlow_24_4.png │ ├── Network-Visualization-TensorFlow_24_40.png │ ├── Network-Visualization-TensorFlow_24_41.png │ ├── Network-Visualization-TensorFlow_24_5.png │ ├── Network-Visualization-TensorFlow_24_6.png │ ├── Network-Visualization-TensorFlow_24_7.png │ ├── Network-Visualization-TensorFlow_24_8.png │ ├── Network-Visualization-TensorFlow_24_9.png │ └── Network-Visualization-TensorFlow_7_0.png ├── Synthesizing-Celebs-BEGAN_files │ └── Synthesizing-Celebs-BEGAN_29_0.png ├── azure-deep-learning.epub ├── azure-deep-learning.pdf ├── azure-deep-learning.tex ├── fashion-mnist.html ├── fooling-images.html ├── how-to-make-a-racist-ai-without-really-trying.html ├── how-to-make-a-racist-ai-without-really-trying_files │ ├── how-to-make-a-racist-ai-without-really-trying_50_1.png │ ├── how-to-make-a-racist-ai-without-really-trying_52_0.png │ ├── how-to-make-a-racist-ai-without-really-trying_61_1.png │ └── how-to-make-a-racist-ai-without-really-trying_64_1.png ├── imgs │ └── azlogin.png ├── index.html ├── libs │ ├── gitbook-2.6.7 │ │ ├── css │ │ │ ├── fontawesome │ │ │ │ └── fontawesome-webfont.ttf │ │ │ ├── plugin-bookdown.css │ │ │ ├── plugin-fontsettings.css │ │ │ ├── plugin-highlight.css │ │ │ ├── plugin-search.css │ │ │ └── style.css │ │ └── js │ │ │ ├── app.min.js │ │ │ ├── jquery.highlight.js │ │ │ ├── lunr.js │ │ │ ├── plugin-bookdown.js │ │ │ ├── plugin-fontsettings.js │ │ │ ├── plugin-search.js │ │ │ └── plugin-sharing.js │ └── jquery-2.2.3 │ │ └── jquery.min.js ├── network-visualization-tensorflow.html ├── neural-style-transfer.html ├── output_10_0.png ├── output_10_1.png ├── output_11_0.png ├── output_11_2.png ├── output_12_2.png ├── output_13_0.png ├── output_13_1.png ├── output_13_2.png ├── output_5_0.png ├── output_9_0.png ├── output_9_1.png ├── pretrained-model.html ├── provisioning-linux-dsvms-with-azure-cli-2-0.html ├── saliency-maps.html ├── search_index.json ├── style.css ├── synthesizing-faces-of-celebrities-boundary-equilibrium-gan-with-celeba-data.html ├── transfer-learning-with-cntk.html ├── upgrading-cntk-and-cudnn.html └── wasserstein-gan-and-loss-sensitive-gan-with-cifar-data.html └── license.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore etc dir 2 | etc/ 3 | 4 | # ignore cache 5 | __pycache__/ 6 | 7 | # ignore ipynb 8 | .ipynb_checkpoints/ 9 | 10 | # ignore pretrained models 11 | Models/ 12 | 13 | # ignore data downloads 14 | cifar-10-batches-py/ 15 | data/ 16 | datasets/ 17 | 18 | # ignore credentials 19 | creds.txt 20 | 21 | # don't push models 22 | *.model 23 | 24 | # Rproj files 25 | .Rproj.user/ 26 | *.Rproj 27 | .Rproj.user 28 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/.nojekyll -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 5 | the rights to use your contribution. For details, visit https://cla.microsoft.com. 6 | 7 | When you submit a pull request, a CLA-bot will automatically determine whether you need to provide 8 | a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions 9 | provided by the bot. You will only need to do this once across all repos using our CLA. 10 | 11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 14 | 15 | # Legal Notices 16 | 17 | Microsoft and any contributors grant you a license to the Microsoft documentation and other content 18 | in this repository under the [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/legalcode), 19 | see the [LICENSE](LICENSE) file, and grant you a license to any code in the repository under the [MIT License](https://opensource.org/licenses/MIT), see the 20 | [LICENSE-CODE](LICENSE-CODE) file. 21 | 22 | Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation 23 | may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. 24 | The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. 25 | Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653. 26 | 27 | Privacy information can be found at https://privacy.microsoft.com/en-us/ 28 | 29 | Microsoft and any contributors reserve all others rights, whether under their respective copyrights, patents, 30 | or trademarks, whether by implication, estoppel or otherwise. -------------------------------------------------------------------------------- /Instructors/Slides/CNTK.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Instructors/Slides/CNTK.pptx -------------------------------------------------------------------------------- /Instructors/Slides/Deep Learning.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Instructors/Slides/Deep Learning.pptx -------------------------------------------------------------------------------- /Instructors/Slides/Softmax.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Instructors/Slides/Softmax.pptx -------------------------------------------------------------------------------- /Instructors/Slides/Transfer Learning.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Instructors/Slides/Transfer Learning.pptx -------------------------------------------------------------------------------- /LICENSE-CODE.TXT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) Microsoft Corporation. All rights reserved. 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and 5 | associated documentation files (the "Software"), to deal in the Software without restriction, 6 | including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial 11 | portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT 14 | NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 15 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 16 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 17 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /NOTICE.TXT: -------------------------------------------------------------------------------- 1 | ##Legal Notices 2 | Microsoft and any contributors grant you a license to the Microsoft documentation and other content 3 | in this repository under the [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/legalcode), 4 | see the LICENSE file, and grant you a license to any code in the repository under the [MIT License](https://opensource.org/licenses/MIT), see the 5 | LICENSE-CODE file. 6 | 7 | Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation 8 | may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries. 9 | The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks. 10 | Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653. 11 | 12 | Privacy information can be found at https://privacy.microsoft.com/ 13 | 14 | Microsoft and any contributors reserve all others rights, whether under their respective copyrights, patents, 15 | or trademarks, whether by implication, estoppel or otherwise. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Deep Learning on Azure 2 | ======================= 3 | 4 | > **NOTE** This content is no longer maintained. Visit the [Azure Machine Learning Notebook](https://github.com/Azure/MachineLearningNotebooks) project for sample Jupyter notebooks for ML and deep learning with Azure Machine Learning. 5 | 6 | This repository contains materials to help you learn about Deep Learning with the [Microsoft Cognitive Toolkit (CNTK)](https://github.com/microsoft/cntk) and Microsoft Azure. [Students](./Students) can find slides, tutorial notebooks, and scripts covering a variety of deep learning fundamentals and applications. These course assets will teach you how to implement convolutional networks, recurrent networks, and generative models and apply them to problems in computer vision, natural language processing, and reinforcement learning. The course materials will pay particular attention on how to implement these algorithms most effectively using the resources provided by the Azure infrastructure, and best practices when working with CNTK. 7 | 8 | ## Part I - Fundamentals and Azure for Machine Learning 9 | 10 | 1. Pretensions to Thinking and Learning - Overview of Machine Learning 11 | 1. A Minimal Introduction to AI, Representation Learning, and Deep Learning 12 | 1. Deploying and Accessing the Linux Data Science Virtual Machine 13 | 1. Computational Graphs, Symbolic Differentation, and Auto-Differentiation 14 | 1. Overview of the Microsoft Cognitive Toolkit (`CNTK`) and Other Deep Learning Frameworks 15 | 1. Activation Functions and Network Architectures 16 | 1. Representational Power and Capacity 17 | 18 | ## Part II - Optimization 19 | 20 | 1. Backpropagation and Stochastic Optimization for Training Neural Networks 21 | 1. Momentum and Acceleration Methods 22 | 1. Regularization, Normalization, and Dropout 23 | 1. Distributed Training and Evaluation with Azure Batch AI 24 | 1. Practical Bayesian Optimization for Hyperparameter Search 25 | 1. Evolutionary Strategies for Parameter Search 26 | 27 | ## Part III - Convolutional Neural Networks 28 | 29 | 1. Scaling Networks to Images 30 | 1. Receptive Fields, Spatial Arrangements, Strides and Filters 31 | 1. Dilated Convolutions and Pooling 32 | 1. Skip Connections and Residual Networks 33 | 34 | ## Part IV - Recurrent Networks 35 | 36 | 1. Dense Word Vector Representations 37 | 1. Comparison of word2Vec, GloVe, and `fasttext` 38 | 1. Recurrent Neural Networks and Language Models 39 | 1. GRUs, LSTMs, and Recursive Architectures 40 | 1. Vanishing and Exploding Gradients 41 | 1. Memory and Attention 42 | 43 | ## Part V - Reinforcement Learning 44 | 45 | 1. Optimal Control and Planning 46 | 1. Policy Gradients 47 | 1. Q-learning 48 | 1. Actor-Critic Methods 49 | 1. Evolutionary Strategies as an Alternative to Policy Methods 50 | 51 | ## Part VI - Generative Models 52 | 53 | 1. Visualizing and Understanding Neural Networks with Saliency Maps 54 | 1. Adversarial Attacks on Neural Networks 55 | 1. Metrics on Distributions for Implicit Generative Models 56 | 1. Generative Adversarial Networks 57 | 1. Belief Nets and Change of Variable Models 58 | 1. Approximate Methods using the Variational Autoencoder 59 | 60 | ## Part VII - Operationalization Methods 61 | 62 | 1. HDInsight, `pyspark` and `mmlspark` 63 | 1. Azure Batch Shipyard / Azure Batch Training 64 | 1. Azure container services 65 | 1. SQL Server 2017 66 | 1. The embedding learning library and web applications 67 | 68 | ## Useful Resources 69 | 70 | ### Online Courses 71 | 72 | + [Deep Learning Explained, edX 2017](https://courses.edx.org/courses/course-v1:Microsoft+DAT236x+2T2017/course/) 73 | * Online MOOC that covers the fundamentals of Deep Learning with the Microsoft Cognitive Toolkit 74 | * Consists of 7 modules 75 | * Released in June 2017 76 | + [deeplearning.ai - Coursera Specialization Taught by Andrew Ng](https://www.deeplearning.ai/) 77 | * Specialization consisting of 5 MOOCs on Deep Learning taught by Andrew Ng 78 | * Taught using TensorFlow 79 | + [fastAI](http://www.fast.ai/) 80 | * 2 Deep Learning courses taught by Jeremy Howard and Rachel Thomas at USF 81 | + [CS231n - Convolutional Networks for Visual Recognition](http://cs231n.stanford.edu/) 82 | + [CS224n - Natural Language Processing with Deep Learning](http://web.stanford.edu/class/cs224n/) 83 | 84 | ### Online Books and Blogs 85 | 86 | + [Neural Networks and Deep Learning - Michael Nielsen](http://neuralnetworksanddeeplearning.com/) 87 | + [Deep Learning - Ian Goodfellow, Youshua Bengio & Aaron Courville](http://www.deeplearningbook.org/) 88 | + [Chris Olah's Blog](http://colah.github.io/) 89 | + [Distill Publications](https://distill.pub/) 90 | + [Andrej Karpathy's Blog](http://karpathy.github.io/) 91 | + [Denny Britz's Blog](http://www.wildml.com/) 92 | + [Edwin Chen's Blog](http://blog.echen.me/) 93 | + [Off the Convex Path - Join blog with Sanjeev Arora, Moritz Hardt & Nisheeth Vishnoi](http://www.offconvex.org/about/) 94 | + [BAIR - Berkeley AI Research Blog](http://bair.berkeley.edu/blog/) 95 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/dsvm-for-dl.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/1-start/dsvm/dsvm-for-dl.pptx -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/0-dsvm-deploy-script/0-provision-dsvm.md: -------------------------------------------------------------------------------- 1 | # Provisioning Linux DSVMs with Azure CLI 2.0 2 | 3 | **What You'll Need** 4 | 5 | + An Azure Subscription 6 | + Bash 7 | * [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/features) 8 | - The Azure cloud shell provides a complete bash (and powershell) environment. The shell is automatically authenticated with Azure CLI 2.0 so you can provision and run all the scripts listed below using this shell.Shell: 9 | * [Windows Subsystem for Linux](https://msdn.microsoft.com/en-us/commandline/wsl/install) 10 | - In addition to WSL, you'll need to install Azure CLI 2.0. See the instructions below or refer to the [Azure CLI 2.0](https://docs.microsoft.com/en-us/cli/azure/overview). 11 | 12 | In this lab you will provision your own **Linux Data Science Virtual Machine (DSVM)**. The DSVM is a virtual machine that contains a large number of data science and machine learning tools preinstalled, including deep learning libraries like CNTK, Tensorflow, Caffe2, PyTorch, Keras, MXNet, and a lot more. 13 | 14 | In this course, we will specifically use the [Ubuntu version](https://docs.microsoft.com/en-us/azure/machine-learning/machine-learning-data-science-dsvm-ubuntu-intro) of the DSVM, since it is the most common distribution for machine learning and data science. Other operating system variants of the DSVM are available in [CentOS](https://docs.microsoft.com/en-us/azure/machine-learning/machine-learning-data-science-linux-dsvm-intro) and [Windows](https://docs.microsoft.com/en-us/azure/machine-learning/machine-learning-data-science-provision-vm) (2012 and 2016 Server Editions). 15 | 16 | You can provision, start, and stop the Linux DSVM from the Azure portal itself. You are recommended to take a look at the [Azure portal](https://portal.azure.com/) and learn the interface. However, in this course we will introduce you to the [Azure CLI 2.0](https://docs.microsoft.com/en-us/cli/azure/overview), which is a command-line interface built in Python and available on macOS, Linux, and Windows. Learning a bit about the CLI can greatly improve your productivity with Azure resources, and will allow you to configuration and management scripts into your daily workflow. For Windows 10 users, I highly recommend you install the [Windows Subsystem for Linux](https://msdn.microsoft.com/en-us/commandline/wsl/install_guide) to get a complete `bash` environment on your system. In addition, I use [conemu](https://conemu.github.io/) as my main console, which has a number of nice features including tabbed support for multiple consoles, including Ubuntu Bash, PowerShell and Git Bash. 17 | 18 | ## Installing and Testing the Azure CLI 19 | 20 | Follow the instructions on the [Azure CLI 2.0 webpage](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) for the latest information on how to install the CLI. Once you have installed the Azure CLI, and added it to your `$PATH`, you should be able to call it from your terminal. 21 | 22 | ***NOTE** _If you're using the Azure Cloud Shell, you don't need to install Azure CLI 2.0 or authenticate. All that is already done for you._ Skip to section [Deploying Via a Custom Script](#deploying). 23 | 24 | ```bash 25 | alizaidi@MININT-C510VH5:~$ which az 26 | /home/alizaidi/anaconda3/bin/az 27 | ``` 28 | 29 | ## Login to Your Azure Account 30 | 31 | In order to the use the Azure CLI 2.0 to manage and create resources in Azure, you'll need to login to your Azure account: 32 | 33 | ```bash 34 | alizaidi@MININT-C510VH5:~$ az login 35 | ``` 36 | 37 | This will prompt you to open a browser and activate your session: 38 | 39 | ```bash 40 | alizaidi@MININT-C510VH5:~$ az login 41 | To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code FA5JZBPGA to authenticate. 42 | ``` 43 | 44 | ![](imgs/azlogin.png) 45 | 46 | Once you're logged into your account, you can list all the Azure subscriptions assoicated with your account by running `az account list`, and the one you're currently defaulting to using `az account show`: 47 | 48 | ```bash 49 | alizaidi@MININT-C510VH5:~$ az account show 50 | { 51 | "environmentName": "AzureCloud", 52 | "id": "please-dont-steal-my-acount", 53 | "isDefault": true, 54 | "name": "Not for you", 55 | "state": "Enabled", 56 | "tenantId": "nah", 57 | "user": { 58 | "name": "alizaidi@microsoft.com", 59 | "type": "user" 60 | } 61 | } 62 | alizaidi@MININT-C510VH5:~$ az account list 63 | [ 64 | { 65 | "cloudName": "AzureCloud", 66 | "id": "please-dont-steal-my-acount", 67 | "isDefault": true, 68 | "name": "Not for you", 69 | "state": "Enabled", 70 | "tenantId": "nah", 71 | "user": { 72 | "name": "alizaidi@microsoft.com", 73 | "type": "user" 74 | } 75 | } 76 | ] 77 | ``` 78 | 79 | You can use the option `--output table` to print the output in a tabular format. 80 | 81 | 82 | ## Deploying Via Custom Script 83 | 84 | Rather than doing this manually, I have created a custom script that will create the DSVM for you, and also run some configuration settings on your VM's network to allow for easier access. 85 | 86 | You can simply deploy the DSVM by navigating to the `labs/0-dsvm-deploy-script` directory and running 87 | 88 | ```bash 89 | alizaidi:$ ./deploydsvm.sh 90 | ``` 91 | 92 | The default parameters will use your bash username as your username for the VM, and a simple password. Feel free to change these by specThis will create your virtual machine, open up all the necessary ports on your VM's network security group, and save the credentials in a text file `creds.txt`. 93 | 94 | ## Updating DSVM OS Disk Size 95 | 96 | By default, your primary partition on the DSVM is only 50 GBs. Fortunately, expanding it is pretty easy. Let's again use two helper scripts I created to make this process trivally easy. Using the `$VMNAME` and `$RG` values saved in the file `creds.txt`, fill in the following command: 97 | 98 | ```bash 99 | expand-osdisk.sh "os-size-in-GB" "RG" "VMNAME" 100 | ``` 101 | 102 | For example, I'd run 103 | 104 | ```bash 105 | expand-disk.sh 200 azaididlclass azaididsvm 106 | ``` 107 | 108 | Now you're ready to log into your VM and have some fun! 109 | 110 | ## Deploying Manually **(Only Proceed if You Didn't Use the Script Above!)** 111 | 112 | If you didn't use the scripts above, I've written out the instructions manually below. 113 | 114 | 115 | ### Create a New Resource Group 116 | 117 | Resource groups are a convenient way of consolidating related resources together. This is particularly handy when you have a project that will require a variety of Azure resources and you'd like to see them all in one-place. 118 | 119 | Please make sure your resource group is in "East US" region (you could potentially use South Central US). 120 | 121 | In this example, I'll create a resource group called `azteachdl` in `eastus` 122 | 123 | ```bash 124 | alizaidi:$ az group create -n azteachdl -l eastus 125 | { 126 | "id": "/subscriptions/stay-away-from me/resourceGroups/azteachdl", 127 | "location": "eastus", 128 | #"managedBy": null, 129 | "name": "azteachdl", 130 | "properties": { 131 | "provisioningState": "Succeeded" 132 | }, 133 | "tags": null 134 | } 135 | ``` 136 | 137 | ### Create Your DSVM 138 | 139 | Now let's create the Linux DSVM. Edit the parameters below with your configurations. In particular, you'll need to specify your own `resource-group` name, a name for the data science virtual machine, and your username. 140 | 141 | ```bash 142 | az vm create \ 143 | --resource-group azteachdl \ 144 | --name azdsvmclass \ 145 | --admin-username alizaidi \ 146 | --public-ip-address-dns-name algoclass \ 147 | --image microsoft-ads:linux-data-science-vm-ubuntu:linuxdsvmubuntu:latest \ 148 | --size Standard_NC6 \ 149 | --generate-ssh-keys 150 | ``` 151 | 152 | While the resources are being deployed, you will see a _"Running"_ message displayed in your terminal. Upon completion, you should see an output JSON table with information about your resources: 153 | 154 | ```bash 155 | { 156 | "fqdns": "", 157 | "id": "/subscriptions/keep-away/resourceGroups/azaididlclass/providers/Microsoft.Compute/virtualMachines/azaidi", 158 | "location": "eastus", 159 | "macAddress": "00-0D-3A-1B-59-48", 160 | "powerState": "VM running", 161 | "privateIpAddress": "10.0.0.4", 162 | "publicIpAddress": "13.00.000.000", 163 | "resourceGroup": "azaididlclass" 164 | } 165 | ``` 166 | 167 | ### Create a Password for the User 168 | 169 | In the scripted solution, the authentication is done through a password. 170 | 171 | In the manual setup we showed how you could create a virtual machine using the `generate-ssh-keys` option, which authenticates using SSH keys, which by default are saved as a pair of private and public keys in `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`. In order to access certain web applications like Jupyter, we'll need a password for our user. 172 | 173 | To create a password for the user, run the following: 174 | 175 | ```bash 176 | sudo passwd $USERNAME 177 | ``` 178 | 179 | where `$USERNAME` is the username you used to create the VM. 180 | 181 | You can now navigate to the portal and check for your resources. 182 | 183 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/0-dsvm-deploy-script/deploydsvm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #title :deploydsvm.sh 3 | #description :This script creates a Linux DSVM in Azure. 4 | #author :Ali Zaidi (github: akzaidi; contact alizaidi at microsoft dot com) 5 | #date :2017-08-04 6 | #version :0.2 7 | #usage :bash deployDSVM.sh "username" "password" "resource-group" "location" "vmname" "sshadmin" "dns" 8 | #notes :Requires azure-CLI, and you must login prior to usage, az login. 9 | #==================================================================================== 10 | 11 | # defining parameters to use in deployment 12 | # uses username on bash profile and concatenates with resources 13 | 14 | yourname=$(whoami) 15 | username=${1:-$yourname} 16 | class="dlclass" 17 | vmsuffix="dsvm" 18 | 19 | ARG2=${2:-"Password123!"} 20 | PASSWORD=$ARG2 21 | 22 | ARG3=${3:-$username$class} 23 | RG=$ARG3 24 | 25 | # available regions: 26 | # https://azure.microsoft.com/en-us/regions/services/ 27 | ARG4=${4:-eastus} 28 | LOC=$ARG4 29 | 30 | ARG5=${5:-$username$vmsuffix} 31 | VMNAME=$ARG5 32 | 33 | ARG6=${6:-$username} 34 | SSHADMIN=$ARG6 35 | 36 | ARG7=${7:-$VMNAME} 37 | DNS=$ARG7 38 | 39 | nsgp="NSG" 40 | NSG=$VMNAME$nsgp 41 | 42 | # Create Resource Group 43 | 44 | az group create -n "$RG" -l "$LOC" 45 | 46 | # Create DSVM 47 | 48 | az vm create \ 49 | --resource-group "$RG" \ 50 | --name "$VMNAME" \ 51 | --admin-username "$SSHADMIN" \ 52 | --public-ip-address-dns-name "$DNS" \ 53 | --image microsoft-ads:linux-data-science-vm-ubuntu:linuxdsvmubuntu:latest \ 54 | --size Standard_NC6 \ 55 | --admin-password "$PASSWORD" 56 | 57 | # verify image SKU by searching dsvm skus 58 | # az vm image list --all --output table --location eastus --publisher microsoft-ads 59 | 60 | 61 | # Open Port 8000 for JupyterHub 62 | 63 | az network nsg rule create \ 64 | --resource-group "$RG" \ 65 | --nsg-name "$NSG" \ 66 | --name JupyterHub \ 67 | --protocol tcp \ 68 | --priority 1001 \ 69 | --destination-port-range 8000 70 | 71 | # Open Port 8888 for JupyterLab 72 | 73 | az network nsg rule create \ 74 | --resource-group "$RG" \ 75 | --nsg-name "$NSG" \ 76 | --name JupyterLab \ 77 | --protocol tcp \ 78 | --priority 1002 \ 79 | --destination-port-range 8888 80 | 81 | # Open Port 6006 for TensorBoard 82 | 83 | az network nsg rule create \ 84 | --resource-group "$RG" \ 85 | --nsg-name "$NSG" \ 86 | --name rstudio-server \ 87 | --protocol tcp \ 88 | --priority 1003 \ 89 | --destination-port-range 6006 90 | 91 | # Open Port 8787 for RStudio-Server 92 | 93 | az network nsg rule create \ 94 | --resource-group "$RG" \ 95 | --nsg-name "$NSG" \ 96 | --name rstudio-server \ 97 | --protocol tcp \ 98 | --priority 1004 \ 99 | --destination-port-range 8787 100 | 101 | # save credentials to text file 102 | 103 | printf "Saving credentials to creds.txt" 104 | 105 | echo "VM Name = " $VMNAME >> creds.txt 106 | echo "Username = " $SSHADMIN >> creds.txt 107 | echo "Password = " $PASSWORD >> creds.txt 108 | echo "DNS Name = " $DNS.$LOC.cloudapp.azure.com >> creds.txt 109 | echo "Network Security Group = " $NSG >> creds.txt 110 | echo "Resource Group = " $RG >> creds.txt 111 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/0-dsvm-deploy-script/imgs/azlogin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/1-start/dsvm/lab/0-dsvm-deploy-script/imgs/azlogin.png -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/1-update-disk-size-script/expand-osdisk.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #title :deploydsvm.sh 3 | #description :This script creates updates disk size on a Linux DSVM in Azure. 4 | #author :Ali Zaidi (github: akzaidi; contact alizaidi at microsoft dot com) 5 | #date :2017-08-04 6 | #version :0.1 7 | #usage :bash expand-osdisk.sh "os-size-in-GB" "resource-group" "vmname" 8 | #notes :Requires azure-CLI, and you must login prior to usage, az login. 9 | #==================================================================================== 10 | 11 | OSSIZE=${1:-200} 12 | RG=$2 13 | VMNAME=$3 14 | 15 | # Get Disk Name 16 | read OSDISK disksize <<< $(./get-osdisk-name.sh $RG | grep "Os") 17 | 18 | # Deallocate so resizing can be done 19 | 20 | az vm deallocate \ 21 | --resource-group "$RG" \ 22 | --name "$VMNAME" 23 | 24 | # Update OS Disk 25 | 26 | az disk update \ 27 | --resource-group "$RG" \ 28 | --name "$OSDISK" \ 29 | --size-gb "$OSSIZE" 30 | 31 | # Restart VM 32 | 33 | az vm start \ 34 | --resource-group "$RG" \ 35 | --name "$VMNAME" 36 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/1-update-disk-size-script/get-osdisk-name.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | yourname=$(whoami) 4 | class="dlclass" 5 | vmname="dsvm" 6 | 7 | ARG1=${1:-$yourname$class} 8 | RG=$ARG1 9 | 10 | VMNAME=${2:-$yourname$vmname} 11 | 12 | # List managed disks on VM 13 | 14 | az disk list \ 15 | --resource-group "$RG" \ 16 | --query '[*].{Name:name,Gb:diskSizeGb,Tier:accountType}' \ 17 | --output table 18 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/2-updating-CNTK/1-Upgrading-CNTK.md: -------------------------------------------------------------------------------- 1 | # Upgrading CNTK and CUDNN 2 | 3 | In this lab, we'll upgrade from CNTK version 2.0 to CNTK 2.2. 4 | 5 | ## Updating CNTK 6 | 7 | CNTK is available in a variety of [precompiled binaries](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine), which you can install using the `pip` installer. 8 | 9 | ## Updating CNTK With a Single Script 10 | 11 | I've created a single script to make the upgrade process a lot simper: 12 | 13 | ```bash 14 | ./cntk-install.sh 15 | ``` 16 | 17 | This will create a new `conda environment` called `cntk-py35` with your new CNTK installation. 18 | 19 | ### Launch JupyterLab 20 | 21 | JupyterLab is an updated environment based on Jupyter. In addition to the notebooks popularized by Jupyter, JupyterLab has an inspector for viewing help files quickly, a terminal, and a file browser. This makes for a more complete interactive development experience! 22 | 23 | To launch JupyterLab and keep it running, I recommend first creating a password you can use to log into your server without having to use special tokens (see the documentation [here](http://nbconvert.readthedocs.io/en/latest/usage.html). Moreover, since we want to keep JupyterLab running in the background even after we close out our terminal session, I'd recommend using [tmux](https://github.com/tmux/tmux/wiki) to launch your Jupyter session. 24 | 25 | 26 | ```bash 27 | 28 | tmux new -s jupyterlab 29 | jupyter notebook --generate-config 30 | jupyter notebook password 31 | jupyter lab --ip="*" 32 | tmux detach 33 | jupyter notebook list 34 | ``` 35 | 36 | 37 | Now navigate to your :8888 to interact with JupyterLab! 38 | 39 | ## Manually Updating CNTK and Launching Jupyter (**No Need to Do This if You Use the Script**) 40 | 41 | Only follow the steps below if the script above didn't work or if you like doing this manually... 42 | 43 | ### Create Conda Virtual Environment 44 | 45 | For this tutorial, we'll use the [`conda` virtual environment manager](https://conda.io/docs/using/envs.html) to create and modify Python virtual environments. You can create a Python 3.5 environment with `conda` by using the `conda create` command: 46 | 47 | 48 | ```bash 49 | conda create -n cntk-py35 python=3.5 anaconda 50 | ``` 51 | 52 | The environment will be named `cntk-py35` and the additional flag `anaconda` ensures that the distribution will install over a 100 prebuilt Python packages for scientific computing (list [here](https://docs.continuum.io/anaconda/packages/pkg-docs)). 53 | 54 | ### Install CNTK Using `pip` Binary Wheels 55 | 56 | We can activate that environment by running 57 | 58 | ```bash 59 | source activate cntk-py35 60 | ``` 61 | 62 | Now that we are in our virutla environment for CNTK, let's install the [appropriate Python binary](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-linux-python?tabs=cntkpy21) using a Python "wheel". For example, here are the installation instructions for CNTK on a Python 3.5 environment with Ubuntu 16.04 system with GPU support: 63 | 64 | ```bash 65 | pip install https://cntk.ai/PythonWheel/GPU/cntk-2.1-cp35-cp35m-linux_x86_64.whl 66 | ``` 67 | 68 | ### Temporary Fixes 69 | 70 | Let's take complete ownership of our `home` and `anaconda` directories: 71 | 72 | ```bash 73 | sudo chown alizaidi:alizaidi -R /home/alizaidi 74 | source deactivate 75 | sudo chown alizaidi:alizaidi -R /anaconda/ 76 | pip install -U pip 77 | ``` 78 | 79 | **IMPORTANT** replace `alizaidi` with the username you used to create the DSVM. 80 | 81 | Update `ipython` and related packages: 82 | 83 | ```bash 84 | pip install --upgrade --force-reinstall jupyter 85 | ``` 86 | 87 | Remove `az_ml_magic` from ipython startup: 88 | 89 | ```bash 90 | rm ~/.ipython/profile_default/startup/az_ml_magic.py 91 | ``` 92 | 93 | ### Conda Extensions 94 | 95 | Since we created a new environment, let's also install some extensions that will make it easier to find that environment from JupyterHub. 96 | 97 | ```bash 98 | conda install nb_conda 99 | conda install ipykernel 100 | python -m ipykernel install --user --name cntk-py35 --display-name "cntk-py35" 101 | jupyter kernelspec list 102 | ``` 103 | 104 | ### Install Keras 105 | 106 | Let's also install Keras in this specific Python environment 107 | 108 | ```bash 109 | source activate cntk-py35 110 | pip install keras 111 | ``` 112 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/2-updating-CNTK/cntk-install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # create conda environment and install cntk 4 | conda create -y -n cntk-py35 python=3.5 anaconda 5 | source activate cntk-py35 6 | yes | pip install https://cntk.ai/PythonWheel/GPU/cntk-2.2-cp35-cp35m-linux_x86_64.whl 7 | 8 | # take ownership of root anaconda 9 | sudo chown $USER:$USER -R /anaconda/ 10 | 11 | # update root anaconda python and jupyter 12 | source deactivate 13 | yes | pip install -U pip 14 | yes | pip install --upgrade --force-reinstall jupyter 15 | rm ~/.ipython/profile_default/startup/az_ml_magic.py 16 | 17 | # update conda kernels and add cntk-py35 spec 18 | conda install -y nb_conda 19 | conda install -y ipykernel 20 | python -m ipykernel install --user --name cntk-py35 --display-name "cntk-py35" 21 | jupyter kernelspec list 22 | 23 | # install keras and tensorflow-gpu 24 | source activate cntk-py35 25 | yes | pip install --upgrade --force-reinstall jupyter # the previous was to update jupyter on root, this will fix ipython cli 26 | yes | pip install --upgrade --force-reinstall jupyter # seems redundant, but for some reason the first fails... 27 | yes | pip install keras 28 | yes | pip install tensorflow-gpu 29 | yes | pip install opencv-python easydict future pydot-ng 30 | conda install -y libgcc 31 | 32 | # update cudnn 33 | wget https://alizaidi.blob.core.windows.net/training/cuda_builds/CUDA/libcudnn6_6.0.21-1%2Bcuda8.0_amd64.deb 34 | sudo dpkg -i libcudnn6_6.0.21-1+cuda8.0_amd64.deb 35 | rm libcudnn6_6.0.21-1+cuda8.0_amd64.deb 36 | python -m ipykernel install --user --name cntk-py35 --display-name "cntk-py35" # TODO: check where this should done 37 | sudo systemctl restart jupyterhub 38 | 39 | # start jupyterlab 40 | # cd ~/notebooks 41 | # tmux new -s jupyterlab 42 | # nohup jupyter lab --ip="*" & disown 43 | # tmux detach 44 | # jupyter notebook list 45 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/3-delete-script/deallocate-vm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # deallocate (stop) virtual machine from incurring charges 4 | 5 | rg=$1 6 | vmname=$2 7 | 8 | az vm deallocate --name $vmname \ 9 | --resource-group $rg 10 | -------------------------------------------------------------------------------- /Students/1-start/dsvm/lab/3-delete-script/delete-rg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rgname=$1 4 | 5 | printf "Deleting resource group %s $rgname" 6 | az group delete --name $rgname --yes 7 | -------------------------------------------------------------------------------- /Students/1-start/intro.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/1-start/intro.pptx -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | im2col_cython.c 3 | im2col_cython.so 4 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/10-adversarial-attacks/tensorflow/utils/__init__.py -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/classifiers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/10-adversarial-attacks/tensorflow/utils/classifiers/__init__.py -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/classifiers/squeezenet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | NUM_CLASSES = 1000 4 | 5 | def fire_module(x,inp,sp,e11p,e33p): 6 | with tf.variable_scope("fire"): 7 | with tf.variable_scope("squeeze"): 8 | W = tf.get_variable("weights",shape=[1,1,inp,sp]) 9 | b = tf.get_variable("bias",shape=[sp]) 10 | s = tf.nn.conv2d(x,W,[1,1,1,1],"VALID")+b 11 | s = tf.nn.relu(s) 12 | with tf.variable_scope("e11"): 13 | W = tf.get_variable("weights",shape=[1,1,sp,e11p]) 14 | b = tf.get_variable("bias",shape=[e11p]) 15 | e11 = tf.nn.conv2d(s,W,[1,1,1,1],"VALID")+b 16 | e11 = tf.nn.relu(e11) 17 | with tf.variable_scope("e33"): 18 | W = tf.get_variable("weights",shape=[3,3,sp,e33p]) 19 | b = tf.get_variable("bias",shape=[e33p]) 20 | e33 = tf.nn.conv2d(s,W,[1,1,1,1],"SAME")+b 21 | e33 = tf.nn.relu(e33) 22 | return tf.concat([e11,e33],3) 23 | 24 | 25 | class SqueezeNet(object): 26 | def extract_features(self, input=None, reuse=True): 27 | if input is None: 28 | input = self.image 29 | x = input 30 | layers = [] 31 | with tf.variable_scope('features', reuse=reuse): 32 | with tf.variable_scope('layer0'): 33 | W = tf.get_variable("weights",shape=[3,3,3,64]) 34 | b = tf.get_variable("bias",shape=[64]) 35 | x = tf.nn.conv2d(x,W,[1,2,2,1],"VALID") 36 | x = tf.nn.bias_add(x,b) 37 | layers.append(x) 38 | with tf.variable_scope('layer1'): 39 | x = tf.nn.relu(x) 40 | layers.append(x) 41 | with tf.variable_scope('layer2'): 42 | x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID') 43 | layers.append(x) 44 | with tf.variable_scope('layer3'): 45 | x = fire_module(x,64,16,64,64) 46 | layers.append(x) 47 | with tf.variable_scope('layer4'): 48 | x = fire_module(x,128,16,64,64) 49 | layers.append(x) 50 | with tf.variable_scope('layer5'): 51 | x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID') 52 | layers.append(x) 53 | with tf.variable_scope('layer6'): 54 | x = fire_module(x,128,32,128,128) 55 | layers.append(x) 56 | with tf.variable_scope('layer7'): 57 | x = fire_module(x,256,32,128,128) 58 | layers.append(x) 59 | with tf.variable_scope('layer8'): 60 | x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID') 61 | layers.append(x) 62 | with tf.variable_scope('layer9'): 63 | x = fire_module(x,256,48,192,192) 64 | layers.append(x) 65 | with tf.variable_scope('layer10'): 66 | x = fire_module(x,384,48,192,192) 67 | layers.append(x) 68 | with tf.variable_scope('layer11'): 69 | x = fire_module(x,384,64,256,256) 70 | layers.append(x) 71 | with tf.variable_scope('layer12'): 72 | x = fire_module(x,512,64,256,256) 73 | layers.append(x) 74 | return layers 75 | 76 | def __init__(self, save_path=None, sess=None): 77 | """Create a SqueezeNet model. 78 | Inputs: 79 | - save_path: path to TensorFlow checkpoint 80 | - sess: TensorFlow session 81 | - input: optional input to the model. If None, will use placeholder for input. 82 | """ 83 | self.image = tf.placeholder('float',shape=[None,None,None,3],name='input_image') 84 | self.labels = tf.placeholder('int32', shape=[None], name='labels') 85 | self.layers = [] 86 | x = self.image 87 | self.layers = self.extract_features(x, reuse=False) 88 | self.features = self.layers[-1] 89 | with tf.variable_scope('classifier'): 90 | with tf.variable_scope('layer0'): 91 | x = self.features 92 | self.layers.append(x) 93 | with tf.variable_scope('layer1'): 94 | W = tf.get_variable("weights",shape=[1,1,512,1000]) 95 | b = tf.get_variable("bias",shape=[1000]) 96 | x = tf.nn.conv2d(x,W,[1,1,1,1],"VALID") 97 | x = tf.nn.bias_add(x,b) 98 | self.layers.append(x) 99 | with tf.variable_scope('layer2'): 100 | x = tf.nn.relu(x) 101 | self.layers.append(x) 102 | with tf.variable_scope('layer3'): 103 | x = tf.nn.avg_pool(x,[1,13,13,1],strides=[1,13,13,1],padding='VALID') 104 | self.layers.append(x) 105 | self.classifier = tf.reshape(x,[-1, NUM_CLASSES]) 106 | 107 | if save_path is not None: 108 | saver = tf.train.Saver() 109 | saver.restore(sess, save_path) 110 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(self.labels, NUM_CLASSES), logits=self.classifier)) 111 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/coco_utils.py: -------------------------------------------------------------------------------- 1 | from builtins import range 2 | import os, json 3 | import numpy as np 4 | import h5py 5 | 6 | BASE_DIR = 'cs231n/datasets/coco_captioning' 7 | 8 | def load_coco_data(base_dir=BASE_DIR, 9 | max_train=None, 10 | pca_features=True): 11 | data = {} 12 | caption_file = os.path.join(base_dir, 'coco2014_captions.h5') 13 | with h5py.File(caption_file, 'r') as f: 14 | for k, v in f.items(): 15 | data[k] = np.asarray(v) 16 | 17 | if pca_features: 18 | train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7_pca.h5') 19 | else: 20 | train_feat_file = os.path.join(base_dir, 'train2014_vgg16_fc7.h5') 21 | with h5py.File(train_feat_file, 'r') as f: 22 | data['train_features'] = np.asarray(f['features']) 23 | 24 | if pca_features: 25 | val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7_pca.h5') 26 | else: 27 | val_feat_file = os.path.join(base_dir, 'val2014_vgg16_fc7.h5') 28 | with h5py.File(val_feat_file, 'r') as f: 29 | data['val_features'] = np.asarray(f['features']) 30 | 31 | dict_file = os.path.join(base_dir, 'coco2014_vocab.json') 32 | with open(dict_file, 'r') as f: 33 | dict_data = json.load(f) 34 | for k, v in dict_data.items(): 35 | data[k] = v 36 | 37 | train_url_file = os.path.join(base_dir, 'train2014_urls.txt') 38 | with open(train_url_file, 'r') as f: 39 | train_urls = np.asarray([line.strip() for line in f]) 40 | data['train_urls'] = train_urls 41 | 42 | val_url_file = os.path.join(base_dir, 'val2014_urls.txt') 43 | with open(val_url_file, 'r') as f: 44 | val_urls = np.asarray([line.strip() for line in f]) 45 | data['val_urls'] = val_urls 46 | 47 | # Maybe subsample the training data 48 | if max_train is not None: 49 | num_train = data['train_captions'].shape[0] 50 | mask = np.random.randint(num_train, size=max_train) 51 | data['train_captions'] = data['train_captions'][mask] 52 | data['train_image_idxs'] = data['train_image_idxs'][mask] 53 | 54 | return data 55 | 56 | 57 | def decode_captions(captions, idx_to_word): 58 | singleton = False 59 | if captions.ndim == 1: 60 | singleton = True 61 | captions = captions[None] 62 | decoded = [] 63 | N, T = captions.shape 64 | for i in range(N): 65 | words = [] 66 | for t in range(T): 67 | word = idx_to_word[captions[i, t]] 68 | if word != '': 69 | words.append(word) 70 | if word == '': 71 | break 72 | decoded.append(' '.join(words)) 73 | if singleton: 74 | decoded = decoded[0] 75 | return decoded 76 | 77 | 78 | def sample_coco_minibatch(data, batch_size=100, split='train'): 79 | split_size = data['%s_captions' % split].shape[0] 80 | mask = np.random.choice(split_size, batch_size) 81 | captions = data['%s_captions' % split][mask] 82 | image_idxs = data['%s_image_idxs' % split][mask] 83 | image_features = data['%s_features' % split][image_idxs] 84 | urls = data['%s_urls' % split][image_idxs] 85 | return captions, image_features, urls 86 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/datasets/get_coco_captioning.sh: -------------------------------------------------------------------------------- 1 | wget "http://cs231n.stanford.edu/coco_captioning.zip" 2 | unzip coco_captioning.zip 3 | rm coco_captioning.zip 4 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/datasets/get_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ./get_coco_captioning.sh 3 | ./get_squeezenet_tf.sh 4 | ./get_imagenet_val.sh 5 | 6 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/datasets/get_imagenet_val.sh: -------------------------------------------------------------------------------- 1 | wget http://cs231n.stanford.edu/imagenet_val_25.npz 2 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/datasets/get_squeezenet_tf.sh: -------------------------------------------------------------------------------- 1 | wget "http://cs231n.stanford.edu/squeezenet_tf.zip" 2 | unzip squeezenet_tf.zip 3 | rm squeezenet_tf.zip 4 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/gradient_check.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from past.builtins import xrange 4 | 5 | import numpy as np 6 | from random import randrange 7 | 8 | def eval_numerical_gradient(f, x, verbose=True, h=0.00001): 9 | """ 10 | a naive implementation of numerical gradient of f at x 11 | - f should be a function that takes a single argument 12 | - x is the point (numpy array) to evaluate the gradient at 13 | """ 14 | 15 | fx = f(x) # evaluate function value at original point 16 | grad = np.zeros_like(x) 17 | # iterate over all indexes in x 18 | it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) 19 | while not it.finished: 20 | 21 | # evaluate function at x+h 22 | ix = it.multi_index 23 | oldval = x[ix] 24 | x[ix] = oldval + h # increment by h 25 | fxph = f(x) # evalute f(x + h) 26 | x[ix] = oldval - h 27 | fxmh = f(x) # evaluate f(x - h) 28 | x[ix] = oldval # restore 29 | 30 | # compute the partial derivative with centered formula 31 | grad[ix] = (fxph - fxmh) / (2 * h) # the slope 32 | if verbose: 33 | print(ix, grad[ix]) 34 | it.iternext() # step to next dimension 35 | 36 | return grad 37 | 38 | 39 | def eval_numerical_gradient_array(f, x, df, h=1e-5): 40 | """ 41 | Evaluate a numeric gradient for a function that accepts a numpy 42 | array and returns a numpy array. 43 | """ 44 | grad = np.zeros_like(x) 45 | it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) 46 | while not it.finished: 47 | ix = it.multi_index 48 | 49 | oldval = x[ix] 50 | x[ix] = oldval + h 51 | pos = f(x).copy() 52 | x[ix] = oldval - h 53 | neg = f(x).copy() 54 | x[ix] = oldval 55 | 56 | grad[ix] = np.sum((pos - neg) * df) / (2 * h) 57 | it.iternext() 58 | return grad 59 | 60 | 61 | def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5): 62 | """ 63 | Compute numeric gradients for a function that operates on input 64 | and output blobs. 65 | 66 | We assume that f accepts several input blobs as arguments, followed by a 67 | blob where outputs will be written. For example, f might be called like: 68 | 69 | f(x, w, out) 70 | 71 | where x and w are input Blobs, and the result of f will be written to out. 72 | 73 | Inputs: 74 | - f: function 75 | - inputs: tuple of input blobs 76 | - output: output blob 77 | - h: step size 78 | """ 79 | numeric_diffs = [] 80 | for input_blob in inputs: 81 | diff = np.zeros_like(input_blob.diffs) 82 | it = np.nditer(input_blob.vals, flags=['multi_index'], 83 | op_flags=['readwrite']) 84 | while not it.finished: 85 | idx = it.multi_index 86 | orig = input_blob.vals[idx] 87 | 88 | input_blob.vals[idx] = orig + h 89 | f(*(inputs + (output,))) 90 | pos = np.copy(output.vals) 91 | input_blob.vals[idx] = orig - h 92 | f(*(inputs + (output,))) 93 | neg = np.copy(output.vals) 94 | input_blob.vals[idx] = orig 95 | 96 | diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h) 97 | 98 | it.iternext() 99 | numeric_diffs.append(diff) 100 | return numeric_diffs 101 | 102 | 103 | def eval_numerical_gradient_net(net, inputs, output, h=1e-5): 104 | return eval_numerical_gradient_blobs(lambda *args: net.forward(), 105 | inputs, output, h=h) 106 | 107 | 108 | def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5): 109 | """ 110 | sample a few random elements and only return numerical 111 | in this dimensions. 112 | """ 113 | 114 | for i in range(num_checks): 115 | ix = tuple([randrange(m) for m in x.shape]) 116 | 117 | oldval = x[ix] 118 | x[ix] = oldval + h # increment by h 119 | fxph = f(x) # evaluate f(x + h) 120 | x[ix] = oldval - h # increment by h 121 | fxmh = f(x) # evaluate f(x - h) 122 | x[ix] = oldval # reset 123 | 124 | grad_numerical = (fxph - fxmh) / (2 * h) 125 | grad_analytic = analytic_grad[ix] 126 | rel_error = (abs(grad_numerical - grad_analytic) / 127 | (abs(grad_numerical) + abs(grad_analytic))) 128 | print('numerical: %f analytic: %f, relative error: %e' 129 | %(grad_numerical, grad_analytic, rel_error)) 130 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/im2col.py: -------------------------------------------------------------------------------- 1 | from builtins import range 2 | import numpy as np 3 | 4 | 5 | def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1): 6 | # First figure out what the size of the output should be 7 | N, C, H, W = x_shape 8 | assert (H + 2 * padding - field_height) % stride == 0 9 | assert (W + 2 * padding - field_height) % stride == 0 10 | out_height = (H + 2 * padding - field_height) / stride + 1 11 | out_width = (W + 2 * padding - field_width) / stride + 1 12 | 13 | i0 = np.repeat(np.arange(field_height), field_width) 14 | i0 = np.tile(i0, C) 15 | i1 = stride * np.repeat(np.arange(out_height), out_width) 16 | j0 = np.tile(np.arange(field_width), field_height * C) 17 | j1 = stride * np.tile(np.arange(out_width), out_height) 18 | i = i0.reshape(-1, 1) + i1.reshape(1, -1) 19 | j = j0.reshape(-1, 1) + j1.reshape(1, -1) 20 | 21 | k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1) 22 | 23 | return (k, i, j) 24 | 25 | 26 | def im2col_indices(x, field_height, field_width, padding=1, stride=1): 27 | """ An implementation of im2col based on some fancy indexing """ 28 | # Zero-pad the input 29 | p = padding 30 | x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') 31 | 32 | k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, 33 | stride) 34 | 35 | cols = x_padded[:, k, i, j] 36 | C = x.shape[1] 37 | cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1) 38 | return cols 39 | 40 | 41 | def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1, 42 | stride=1): 43 | """ An implementation of col2im based on fancy indexing and np.add.at """ 44 | N, C, H, W = x_shape 45 | H_padded, W_padded = H + 2 * padding, W + 2 * padding 46 | x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype) 47 | k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, 48 | stride) 49 | cols_reshaped = cols.reshape(C * field_height * field_width, -1, N) 50 | cols_reshaped = cols_reshaped.transpose(2, 0, 1) 51 | np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped) 52 | if padding == 0: 53 | return x_padded 54 | return x_padded[:, :, padding:-padding, padding:-padding] 55 | 56 | pass 57 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/im2col_cython.pyx: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | cimport numpy as np 3 | cimport cython 4 | 5 | # DTYPE = np.float64 6 | # ctypedef np.float64_t DTYPE_t 7 | 8 | ctypedef fused DTYPE_t: 9 | np.float32_t 10 | np.float64_t 11 | 12 | def im2col_cython(np.ndarray[DTYPE_t, ndim=4] x, int field_height, 13 | int field_width, int padding, int stride): 14 | cdef int N = x.shape[0] 15 | cdef int C = x.shape[1] 16 | cdef int H = x.shape[2] 17 | cdef int W = x.shape[3] 18 | 19 | cdef int HH = (H + 2 * padding - field_height) / stride + 1 20 | cdef int WW = (W + 2 * padding - field_width) / stride + 1 21 | 22 | cdef int p = padding 23 | cdef np.ndarray[DTYPE_t, ndim=4] x_padded = np.pad(x, 24 | ((0, 0), (0, 0), (p, p), (p, p)), mode='constant') 25 | 26 | cdef np.ndarray[DTYPE_t, ndim=2] cols = np.zeros( 27 | (C * field_height * field_width, N * HH * WW), 28 | dtype=x.dtype) 29 | 30 | # Moving the inner loop to a C function with no bounds checking works, but does 31 | # not seem to help performance in any measurable way. 32 | 33 | im2col_cython_inner(cols, x_padded, N, C, H, W, HH, WW, 34 | field_height, field_width, padding, stride) 35 | return cols 36 | 37 | 38 | @cython.boundscheck(False) 39 | cdef int im2col_cython_inner(np.ndarray[DTYPE_t, ndim=2] cols, 40 | np.ndarray[DTYPE_t, ndim=4] x_padded, 41 | int N, int C, int H, int W, int HH, int WW, 42 | int field_height, int field_width, int padding, int stride) except? -1: 43 | cdef int c, ii, jj, row, yy, xx, i, col 44 | 45 | for c in range(C): 46 | for yy in range(HH): 47 | for xx in range(WW): 48 | for ii in range(field_height): 49 | for jj in range(field_width): 50 | row = c * field_width * field_height + ii * field_height + jj 51 | for i in range(N): 52 | col = yy * WW * N + xx * N + i 53 | cols[row, col] = x_padded[i, c, stride * yy + ii, stride * xx + jj] 54 | 55 | 56 | 57 | def col2im_cython(np.ndarray[DTYPE_t, ndim=2] cols, int N, int C, int H, int W, 58 | int field_height, int field_width, int padding, int stride): 59 | cdef np.ndarray x = np.empty((N, C, H, W), dtype=cols.dtype) 60 | cdef int HH = (H + 2 * padding - field_height) / stride + 1 61 | cdef int WW = (W + 2 * padding - field_width) / stride + 1 62 | cdef np.ndarray[DTYPE_t, ndim=4] x_padded = np.zeros((N, C, H + 2 * padding, W + 2 * padding), 63 | dtype=cols.dtype) 64 | 65 | # Moving the inner loop to a C-function with no bounds checking improves 66 | # performance quite a bit for col2im. 67 | col2im_cython_inner(cols, x_padded, N, C, H, W, HH, WW, 68 | field_height, field_width, padding, stride) 69 | if padding > 0: 70 | return x_padded[:, :, padding:-padding, padding:-padding] 71 | return x_padded 72 | 73 | 74 | @cython.boundscheck(False) 75 | cdef int col2im_cython_inner(np.ndarray[DTYPE_t, ndim=2] cols, 76 | np.ndarray[DTYPE_t, ndim=4] x_padded, 77 | int N, int C, int H, int W, int HH, int WW, 78 | int field_height, int field_width, int padding, int stride) except? -1: 79 | cdef int c, ii, jj, row, yy, xx, i, col 80 | 81 | for c in range(C): 82 | for ii in range(field_height): 83 | for jj in range(field_width): 84 | row = c * field_width * field_height + ii * field_height + jj 85 | for yy in range(HH): 86 | for xx in range(WW): 87 | for i in range(N): 88 | col = yy * WW * N + xx * N + i 89 | x_padded[i, c, stride * yy + ii, stride * xx + jj] += cols[row, col] 90 | 91 | 92 | @cython.boundscheck(False) 93 | @cython.wraparound(False) 94 | cdef col2im_6d_cython_inner(np.ndarray[DTYPE_t, ndim=6] cols, 95 | np.ndarray[DTYPE_t, ndim=4] x_padded, 96 | int N, int C, int H, int W, int HH, int WW, 97 | int out_h, int out_w, int pad, int stride): 98 | 99 | cdef int c, hh, ww, n, h, w 100 | for n in range(N): 101 | for c in range(C): 102 | for hh in range(HH): 103 | for ww in range(WW): 104 | for h in range(out_h): 105 | for w in range(out_w): 106 | x_padded[n, c, stride * h + hh, stride * w + ww] += cols[c, hh, ww, n, h, w] 107 | 108 | 109 | def col2im_6d_cython(np.ndarray[DTYPE_t, ndim=6] cols, int N, int C, int H, int W, 110 | int HH, int WW, int pad, int stride): 111 | cdef np.ndarray x = np.empty((N, C, H, W), dtype=cols.dtype) 112 | cdef int out_h = (H + 2 * pad - HH) / stride + 1 113 | cdef int out_w = (W + 2 * pad - WW) / stride + 1 114 | cdef np.ndarray[DTYPE_t, ndim=4] x_padded = np.zeros((N, C, H + 2 * pad, W + 2 * pad), 115 | dtype=cols.dtype) 116 | 117 | col2im_6d_cython_inner(cols, x_padded, N, C, H, W, HH, WW, out_h, out_w, pad, stride) 118 | 119 | if pad > 0: 120 | return x_padded[:, :, pad:-pad, pad:-pad] 121 | return x_padded 122 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/image_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from future import standard_library 3 | standard_library.install_aliases() 4 | from builtins import range 5 | import urllib.request, urllib.error, urllib.parse, os, tempfile 6 | 7 | import numpy as np 8 | from scipy.misc import imread, imresize 9 | 10 | """ 11 | Utility functions used for viewing and processing images. 12 | """ 13 | 14 | def blur_image(X): 15 | """ 16 | A very gentle image blurring operation, to be used as a regularizer for 17 | image generation. 18 | 19 | Inputs: 20 | - X: Image data of shape (N, 3, H, W) 21 | 22 | Returns: 23 | - X_blur: Blurred version of X, of shape (N, 3, H, W) 24 | """ 25 | from cs231n.fast_layers import conv_forward_fast 26 | w_blur = np.zeros((3, 3, 3, 3)) 27 | b_blur = np.zeros(3) 28 | blur_param = {'stride': 1, 'pad': 1} 29 | for i in range(3): 30 | w_blur[i, i] = np.asarray([[1, 2, 1], [2, 188, 2], [1, 2, 1]], 31 | dtype=np.float32) 32 | w_blur /= 200.0 33 | return conv_forward_fast(X, w_blur, b_blur, blur_param)[0] 34 | 35 | 36 | SQUEEZENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32) 37 | SQUEEZENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32) 38 | 39 | def preprocess_image(img): 40 | """Preprocess an image for squeezenet. 41 | 42 | Subtracts the pixel mean and divides by the standard deviation. 43 | """ 44 | return (img.astype(np.float32)/255.0 - SQUEEZENET_MEAN) / SQUEEZENET_STD 45 | 46 | 47 | def deprocess_image(img, rescale=False): 48 | """Undo preprocessing on an image and convert back to uint8.""" 49 | img = (img * SQUEEZENET_STD + SQUEEZENET_MEAN) 50 | if rescale: 51 | vmin, vmax = img.min(), img.max() 52 | img = (img - vmin) / (vmax - vmin) 53 | return np.clip(255 * img, 0.0, 255.0).astype(np.uint8) 54 | 55 | 56 | def image_from_url(url): 57 | """ 58 | Read an image from a URL. Returns a numpy array with the pixel data. 59 | We write the image to a temporary file then read it back. Kinda gross. 60 | """ 61 | try: 62 | f = urllib.request.urlopen(url) 63 | _, fname = tempfile.mkstemp() 64 | with open(fname, 'wb') as ff: 65 | ff.write(f.read()) 66 | img = imread(fname) 67 | os.remove(fname) 68 | return img 69 | except urllib.error.URLError as e: 70 | print('URL Error: ', e.reason, url) 71 | except urllib.error.HTTPError as e: 72 | print('HTTP Error: ', e.code, url) 73 | 74 | 75 | def load_image(filename, size=None): 76 | """Load and resize an image from disk. 77 | 78 | Inputs: 79 | - filename: path to file 80 | - size: size of shortest dimension after rescaling 81 | """ 82 | img = imread(filename) 83 | if size is not None: 84 | orig_shape = np.array(img.shape[:2]) 85 | min_idx = np.argmin(orig_shape) 86 | scale_factor = float(size) / orig_shape[min_idx] 87 | new_shape = (orig_shape * scale_factor).astype(int) 88 | img = imresize(img, scale_factor) 89 | return img 90 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/layer_utils.py: -------------------------------------------------------------------------------- 1 | from cs231n.layers import * 2 | from cs231n.fast_layers import * 3 | 4 | 5 | def affine_relu_forward(x, w, b): 6 | """ 7 | Convenience layer that perorms an affine transform followed by a ReLU 8 | 9 | Inputs: 10 | - x: Input to the affine layer 11 | - w, b: Weights for the affine layer 12 | 13 | Returns a tuple of: 14 | - out: Output from the ReLU 15 | - cache: Object to give to the backward pass 16 | """ 17 | a, fc_cache = affine_forward(x, w, b) 18 | out, relu_cache = relu_forward(a) 19 | cache = (fc_cache, relu_cache) 20 | return out, cache 21 | 22 | 23 | def affine_relu_backward(dout, cache): 24 | """ 25 | Backward pass for the affine-relu convenience layer 26 | """ 27 | fc_cache, relu_cache = cache 28 | da = relu_backward(dout, relu_cache) 29 | dx, dw, db = affine_backward(da, fc_cache) 30 | return dx, dw, db 31 | 32 | 33 | def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): 34 | """ 35 | Convenience layer that performs an affine transform, batch normalization, 36 | and ReLU. 37 | 38 | Inputs: 39 | - x: Array of shape (N, D1); input to the affine layer 40 | - w, b: Arrays of shape (D2, D2) and (D2,) giving the weight and bias for 41 | the affine transform. 42 | - gamma, beta: Arrays of shape (D2,) and (D2,) giving scale and shift 43 | parameters for batch normalization. 44 | - bn_param: Dictionary of parameters for batch normalization. 45 | 46 | Returns: 47 | - out: Output from ReLU, of shape (N, D2) 48 | - cache: Object to give to the backward pass. 49 | """ 50 | a, fc_cache = affine_forward(x, w, b) 51 | a_bn, bn_cache = batchnorm_forward(a, gamma, beta, bn_param) 52 | out, relu_cache = relu_forward(a_bn) 53 | cache = (fc_cache, bn_cache, relu_cache) 54 | return out, cache 55 | 56 | 57 | def affine_bn_relu_backward(dout, cache): 58 | """ 59 | Backward pass for the affine-batchnorm-relu convenience layer. 60 | """ 61 | fc_cache, bn_cache, relu_cache = cache 62 | da_bn = relu_backward(dout, relu_cache) 63 | da, dgamma, dbeta = batchnorm_backward(da_bn, bn_cache) 64 | dx, dw, db = affine_backward(da, fc_cache) 65 | return dx, dw, db, dgamma, dbeta 66 | 67 | 68 | def conv_relu_forward(x, w, b, conv_param): 69 | """ 70 | A convenience layer that performs a convolution followed by a ReLU. 71 | 72 | Inputs: 73 | - x: Input to the convolutional layer 74 | - w, b, conv_param: Weights and parameters for the convolutional layer 75 | 76 | Returns a tuple of: 77 | - out: Output from the ReLU 78 | - cache: Object to give to the backward pass 79 | """ 80 | a, conv_cache = conv_forward_fast(x, w, b, conv_param) 81 | out, relu_cache = relu_forward(a) 82 | cache = (conv_cache, relu_cache) 83 | return out, cache 84 | 85 | 86 | def conv_relu_backward(dout, cache): 87 | """ 88 | Backward pass for the conv-relu convenience layer. 89 | """ 90 | conv_cache, relu_cache = cache 91 | da = relu_backward(dout, relu_cache) 92 | dx, dw, db = conv_backward_fast(da, conv_cache) 93 | return dx, dw, db 94 | 95 | 96 | def conv_bn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): 97 | a, conv_cache = conv_forward_fast(x, w, b, conv_param) 98 | an, bn_cache = spatial_batchnorm_forward(a, gamma, beta, bn_param) 99 | out, relu_cache = relu_forward(an) 100 | cache = (conv_cache, bn_cache, relu_cache) 101 | return out, cache 102 | 103 | 104 | def conv_bn_relu_backward(dout, cache): 105 | conv_cache, bn_cache, relu_cache = cache 106 | dan = relu_backward(dout, relu_cache) 107 | da, dgamma, dbeta = spatial_batchnorm_backward(dan, bn_cache) 108 | dx, dw, db = conv_backward_fast(da, conv_cache) 109 | return dx, dw, db, dgamma, dbeta 110 | 111 | 112 | def conv_relu_pool_forward(x, w, b, conv_param, pool_param): 113 | """ 114 | Convenience layer that performs a convolution, a ReLU, and a pool. 115 | 116 | Inputs: 117 | - x: Input to the convolutional layer 118 | - w, b, conv_param: Weights and parameters for the convolutional layer 119 | - pool_param: Parameters for the pooling layer 120 | 121 | Returns a tuple of: 122 | - out: Output from the pooling layer 123 | - cache: Object to give to the backward pass 124 | """ 125 | a, conv_cache = conv_forward_fast(x, w, b, conv_param) 126 | s, relu_cache = relu_forward(a) 127 | out, pool_cache = max_pool_forward_fast(s, pool_param) 128 | cache = (conv_cache, relu_cache, pool_cache) 129 | return out, cache 130 | 131 | 132 | def conv_relu_pool_backward(dout, cache): 133 | """ 134 | Backward pass for the conv-relu-pool convenience layer 135 | """ 136 | conv_cache, relu_cache, pool_cache = cache 137 | ds = max_pool_backward_fast(dout, pool_cache) 138 | da = relu_backward(ds, relu_cache) 139 | dx, dw, db = conv_backward_fast(da, conv_cache) 140 | return dx, dw, db 141 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/optim.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | """ 4 | This file implements various first-order update rules that are commonly used for 5 | training neural networks. Each update rule accepts current weights and the 6 | gradient of the loss with respect to those weights and produces the next set of 7 | weights. Each update rule has the same interface: 8 | 9 | def update(w, dw, config=None): 10 | 11 | Inputs: 12 | - w: A numpy array giving the current weights. 13 | - dw: A numpy array of the same shape as w giving the gradient of the 14 | loss with respect to w. 15 | - config: A dictionary containing hyperparameter values such as learning rate, 16 | momentum, etc. If the update rule requires caching values over many 17 | iterations, then config will also hold these cached values. 18 | 19 | Returns: 20 | - next_w: The next point after the update. 21 | - config: The config dictionary to be passed to the next iteration of the 22 | update rule. 23 | 24 | NOTE: For most update rules, the default learning rate will probably not perform 25 | well; however the default values of the other hyperparameters should work well 26 | for a variety of different problems. 27 | 28 | For efficiency, update rules may perform in-place updates, mutating w and 29 | setting next_w equal to w. 30 | """ 31 | 32 | 33 | def sgd(w, dw, config=None): 34 | """ 35 | Performs vanilla stochastic gradient descent. 36 | 37 | config format: 38 | - learning_rate: Scalar learning rate. 39 | """ 40 | if config is None: config = {} 41 | config.setdefault('learning_rate', 1e-2) 42 | 43 | w -= config['learning_rate'] * dw 44 | return w, config 45 | 46 | 47 | def adam(x, dx, config=None): 48 | """ 49 | Uses the Adam update rule, which incorporates moving averages of both the 50 | gradient and its square and a bias correction term. 51 | 52 | config format: 53 | - learning_rate: Scalar learning rate. 54 | - beta1: Decay rate for moving average of first moment of gradient. 55 | - beta2: Decay rate for moving average of second moment of gradient. 56 | - epsilon: Small scalar used for smoothing to avoid dividing by zero. 57 | - m: Moving average of gradient. 58 | - v: Moving average of squared gradient. 59 | - t: Iteration number. 60 | """ 61 | if config is None: config = {} 62 | config.setdefault('learning_rate', 1e-3) 63 | config.setdefault('beta1', 0.9) 64 | config.setdefault('beta2', 0.999) 65 | config.setdefault('epsilon', 1e-8) 66 | config.setdefault('m', np.zeros_like(x)) 67 | config.setdefault('v', np.zeros_like(x)) 68 | config.setdefault('t', 0) 69 | 70 | next_x = None 71 | beta1, beta2, eps = config['beta1'], config['beta2'], config['epsilon'] 72 | t, m, v = config['t'], config['m'], config['v'] 73 | m = beta1 * m + (1 - beta1) * dx 74 | v = beta2 * v + (1 - beta2) * (dx * dx) 75 | t += 1 76 | alpha = config['learning_rate'] * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t) 77 | x -= alpha * (m / (np.sqrt(v) + eps)) 78 | config['t'] = t 79 | config['m'] = m 80 | config['v'] = v 81 | next_x = x 82 | 83 | return next_x, config 84 | -------------------------------------------------------------------------------- /Students/10-adversarial-attacks/tensorflow/utils/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy 5 | 6 | extensions = [ 7 | Extension('im2col_cython', ['im2col_cython.pyx'], 8 | include_dirs = [numpy.get_include()] 9 | ), 10 | ] 11 | 12 | setup( 13 | ext_modules = cythonize(extensions), 14 | ) 15 | -------------------------------------------------------------------------------- /Students/12-biased-embeddings/racist-ai.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/12-biased-embeddings/racist-ai.zip -------------------------------------------------------------------------------- /Students/1b-computational-graphs/comp-graphs.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/1b-computational-graphs/comp-graphs.pptx -------------------------------------------------------------------------------- /Students/2-transfer-learning/CatsvsDogs/utils/download_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | 3 | # Licensed under the MIT license. See LICENSE.md file in the project root 4 | # for full license information. 5 | # ============================================================================== 6 | 7 | from __future__ import print_function 8 | import os 9 | import sys 10 | try: 11 | from urllib.request import urlretrieve 12 | except ImportError: 13 | from urllib import urlretrieve 14 | 15 | # Add models here like this: (category, model_name, model_url) 16 | models = (('Image Classification', 'AlexNet_ImageNet_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/AlexNet_ImageNet_CNTK.model'), 17 | ('Image Classification', 'AlexNet_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/AlexNet_ImageNet_Caffe.model'), 18 | ('Image Classification', 'InceptionV3_ImageNet_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/InceptionV3_ImageNet_CNTK.model'), 19 | ('Image Classification', 'BNInception_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/BNInception_ImageNet_Caffe.model'), 20 | ('Image Classification', 'ResNet18_ImageNet_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/ResNet18_ImageNet_CNTK.model'), 21 | ('Image Classification', 'ResNet34_ImageNet_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/ResNet34_ImageNet_CNTK.model'), 22 | ('Image Classification', 'ResNet50_ImageNet_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/ResNet50_ImageNet_CNTK.model'), 23 | ('Image Classification', 'ResNet20_CIFAR10_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/ResNet20_CIFAR10_CNTK.model'), 24 | ('Image Classification', 'ResNet110_CIFAR10_CNTK', 'https://www.cntk.ai/Models/CNTK_Pretrained/ResNet110_CIFAR10_CNTK.model'), 25 | ('Image Classification', 'ResNet50_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/ResNet50_ImageNet_Caffe.model'), 26 | ('Image Classification', 'ResNet101_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/ResNet101_ImageNet_Caffe.model'), 27 | ('Image Classification', 'ResNet152_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/ResNet152_ImageNet_Caffe.model'), 28 | ('Image Classification', 'VGG16_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/VGG16_ImageNet_Caffe.model'), 29 | ('Image Classification', 'VGG19_ImageNet_Caffe', 'https://www.cntk.ai/Models/Caffe_Converted/VGG19_ImageNet_Caffe.model'), 30 | ('Image Object Detection', 'Fast-RCNN_grocery100', 'https://www.cntk.ai/Models/FRCN_Grocery/Fast-RCNN_grocery100.model'), 31 | ('Image Object Detection', 'Fast-RCNN_Pascal', 'https://www.cntk.ai/Models/FRCN_Pascal/Fast-RCNN.model')) 32 | 33 | def download_model(model_file_name, model_url): 34 | model_dir = os.path.dirname(os.path.abspath(__file__)) 35 | filename = os.path.join(model_dir, model_file_name) 36 | if not os.path.exists(filename): 37 | print('Downloading model from ' + model_url + ', may take a while...') 38 | urlretrieve(model_url, filename) 39 | print('Saved model as ' + filename) 40 | else: 41 | print('CNTK model already available at ' + filename) 42 | return filename 43 | 44 | def download_model_by_name(model_name): 45 | if model_name.endswith('.model'): 46 | model_name = model_name[:-6] 47 | 48 | model = next((x for x in models if x[1]==model_name), None) 49 | if model is None: 50 | print("ERROR: Unknown model name '%s'." % model_name) 51 | file = list_available_models() 52 | else: 53 | file = download_model(model_name + '.model', model[2]) 54 | return file 55 | 56 | def list_available_models(): 57 | print("\nAvailable models (for more information see Readme.md):") 58 | max_cat = max(len(x[1]) for x in models) 59 | max_name = max(len(x[1]) for x in models) 60 | print("{:<{width}} {}".format('Model name', 'Category', width=max_name)) 61 | print("{:-<{width}} {:-<{width_cat}}".format('', '', width=max_name, width_cat=max_cat)) 62 | for model in sorted(models): 63 | print("{:<{width}} {}".format(model[1], model[0], width=max_name)) 64 | 65 | if __name__ == "__main__": 66 | args = sys.argv 67 | if len(args) != 2: 68 | print("Please provide a model name as the single argument. Usage:") 69 | print(" python download_model.py ") 70 | list_available_models() 71 | else: 72 | model_name = args[1] 73 | if model_name == 'list': 74 | list_available_models() 75 | else: 76 | download_model_by_name(model_name) 77 | -------------------------------------------------------------------------------- /Students/2-transfer-learning/README.md: -------------------------------------------------------------------------------- 1 | # CNTK Examples: Image/TransferLearning 2 | 3 | ## Overview 4 | 5 | |Data: |A data set containing images of 102 different types of flowers ([website](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html)). 6 | |:---------|:--- 7 | |Purpose |Demonstrate how to perform transfer learning in CNTK. 8 | |Network |Pre-trained ResNet_18 model, which is modified to fit the flowers data set. 9 | |Training |In this example all layers (old and new) are trained with the same learning rate. 10 | 11 | ## Running the example 12 | 13 | ### Getting the data 14 | 15 | We use the `Flowers` data set ([Examples/Image/DataSets/Flowers](../DataSets/Flowers)) and a pre-trained ResNet_18 model [PretrainedModels/ResNet18_ImageNet_CNTK.model](../../../PretrainedModels). To download both run 16 | 17 | `python install_data_and_model.py` 18 | 19 | ### Details 20 | 21 | Run `python TransferLearning.py` to train and evaluate the transfer learning model. The model achieves 93% accuracy on the Flowers data set after training for 20 epochs. A detailed walk through is provided in the ['Build your own image classifier using Transfer Learning'](https://docs.microsoft.com/en-us/cognitive-toolkit/Build-your-own-image-classifier-using-Transfer-Learning) tutorial on the CNTK github wiki. 22 | -------------------------------------------------------------------------------- /Students/2-transfer-learning/TransferLearning_Extended.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | 3 | # Licensed under the MIT license. See LICENSE.md file in the project root 4 | # for full license information. 5 | # ============================================================================== 6 | 7 | from __future__ import print_function 8 | import numpy as np 9 | import os 10 | from cntk import load_model 11 | from TransferLearning import * 12 | 13 | 14 | # define base model location and characteristics 15 | base_folder = os.path.dirname(os.path.abspath(__file__)) 16 | base_model_file = os.path.join(base_folder, "..", "..", "..", "PretrainedModels", "ResNet18_ImageNet_CNTK.model") 17 | feature_node_name = "features" 18 | last_hidden_node_name = "z.x" 19 | image_height = 224 20 | image_width = 224 21 | num_channels = 3 22 | 23 | # define data location and characteristics 24 | train_image_folder = os.path.join(base_folder, "..", "DataSets", "Animals", "Train") 25 | test_image_folder = os.path.join(base_folder, "..", "DataSets", "Animals", "Test") 26 | file_endings = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG'] 27 | 28 | def create_map_file_from_folder(root_folder, class_mapping, include_unknown=False): 29 | map_file_name = os.path.join(root_folder, "map.txt") 30 | lines = [] 31 | for class_id in range(0, len(class_mapping)): 32 | folder = os.path.join(root_folder, class_mapping[class_id]) 33 | if os.path.exists(folder): 34 | for entry in os.listdir(folder): 35 | filename = os.path.join(folder, entry) 36 | if os.path.isfile(filename) and os.path.splitext(filename)[1] in file_endings: 37 | lines.append("{0}\t{1}\n".format(filename, class_id)) 38 | 39 | if include_unknown: 40 | for entry in os.listdir(root_folder): 41 | filename = os.path.join(root_folder, entry) 42 | if os.path.isfile(filename) and os.path.splitext(filename)[1] in file_endings: 43 | lines.append("{0}\t-1\n".format(filename)) 44 | 45 | lines.sort() 46 | with open(map_file_name , 'w') as map_file: 47 | for line in lines: 48 | map_file.write(line) 49 | 50 | return map_file_name 51 | 52 | def create_class_mapping_from_folder(root_folder): 53 | classes = [] 54 | for _, directories, _ in os.walk(root_folder): 55 | for directory in directories: 56 | classes.append(directory) 57 | classes.sort() 58 | return np.asarray(classes) 59 | 60 | def format_output_line(img_name, true_class, probs, class_mapping, top_n=3): 61 | class_probs = np.column_stack((probs, class_mapping)).tolist() 62 | class_probs.sort(key=lambda x: float(x[0]), reverse=True) 63 | top_n = min(top_n, len(class_mapping)) if top_n > 0 else len(class_mapping) 64 | true_class_name = class_mapping[true_class] if true_class >= 0 else 'unknown' 65 | line = '[{"class": "%s", "predictions": {' % true_class_name 66 | for i in range(0, top_n): 67 | line = '%s"%s":%.3f, ' % (line, class_probs[i][1], float(class_probs[i][0])) 68 | line = '%s}, "image": "%s"}]\n' % (line[:-2], img_name.replace('\\', '/').rsplit('/', 1)[1]) 69 | return line 70 | 71 | def train_and_eval(_base_model_file, _train_image_folder, _test_image_folder, _results_file, _new_model_file, testing = False): 72 | # check for model and data existence 73 | if not (os.path.exists(_base_model_file) and os.path.exists(_train_image_folder) and os.path.exists(_test_image_folder)): 74 | print("Please run 'python install_data_and_model.py' first to get the required data and model.") 75 | exit(0) 76 | 77 | # get class mapping and map files from train and test image folder 78 | class_mapping = create_class_mapping_from_folder(_train_image_folder) 79 | train_map_file = create_map_file_from_folder(_train_image_folder, class_mapping) 80 | test_map_file = create_map_file_from_folder(_test_image_folder, class_mapping, include_unknown=True) 81 | 82 | # train 83 | trained_model = train_model(_base_model_file, feature_node_name, last_hidden_node_name, 84 | image_width, image_height, num_channels, 85 | len(class_mapping), train_map_file, num_epochs=30, freeze=True) 86 | 87 | if not testing: 88 | trained_model.save(_new_model_file) 89 | print("Stored trained model at %s" % tl_model_file) 90 | 91 | # evaluate test images 92 | with open(_results_file, 'w') as output_file: 93 | with open(test_map_file, "r") as input_file: 94 | for line in input_file: 95 | tokens = line.rstrip().split('\t') 96 | img_file = tokens[0] 97 | true_label = int(tokens[1]) 98 | probs = eval_single_image(trained_model, img_file, image_width, image_height) 99 | 100 | formatted_line = format_output_line(img_file, true_label, probs, class_mapping) 101 | output_file.write(formatted_line) 102 | 103 | print("Done. Wrote output to %s" % _results_file) 104 | 105 | if __name__ == '__main__': 106 | try_set_default_device(gpu(0)) 107 | 108 | results_file = os.path.join(base_folder, "Output", "predictions.txt") 109 | new_model_file = os.path.join(base_folder, "Output", "TransferLearning.model") 110 | train_and_eval(base_model_file, train_image_folder, test_image_folder, results_file, new_model_file) 111 | -------------------------------------------------------------------------------- /Students/2-transfer-learning/install_data_and_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft. All rights reserved. 2 | 3 | # Licensed under the MIT license. See LICENSE.md file in the project root 4 | # for full license information. 5 | # ============================================================================== 6 | 7 | from __future__ import print_function 8 | import os, sys 9 | 10 | 11 | base_folder = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | sys.path.append(os.path.join(base_folder, "..", "DataSets", "Flowers")) 14 | from install_flowers import download_flowers_data 15 | download_flowers_data() 16 | 17 | sys.path.append(os.path.join(base_folder, "..", "DataSets", "Animals")) 18 | from install_animals import download_animals_data 19 | download_animals_data() 20 | 21 | sys.path.append(os.path.join(base_folder, "..", "DataSets", "Grocery")) 22 | from install_grocery import download_grocery_data 23 | download_grocery_data() 24 | 25 | sys.path.append(os.path.join(base_folder, "..", "..", "..", "PretrainedModels")) 26 | from download_model import download_model_by_name 27 | download_model_by_name("ResNet18_ImageNet_CNTK") 28 | 29 | -------------------------------------------------------------------------------- /Students/3-keras/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/3-keras/common/__init__.py -------------------------------------------------------------------------------- /Students/3-keras/common/params.py: -------------------------------------------------------------------------------- 1 | # Hyperparams 2 | EPOCHS = 10 3 | BATCHSIZE = 64 4 | LR = 0.01 5 | MOMENTUM = 0.9 6 | N_CLASSES = 10 7 | GPU = True -------------------------------------------------------------------------------- /Students/3-keras/common/utils.py: -------------------------------------------------------------------------------- 1 | from sklearn.datasets import fetch_mldata 2 | from sklearn.preprocessing import OneHotEncoder 3 | from sklearn.model_selection import train_test_split 4 | 5 | import numpy as np 6 | import os 7 | import tarfile 8 | import pickle 9 | import subprocess 10 | 11 | 12 | import sys 13 | if sys.version_info.major == 2: 14 | # Backward compatibility with python 2. 15 | from six.moves import urllib 16 | urlretrieve = urllib.request.urlretrieve 17 | else: 18 | from urllib.request import urlretrieve 19 | 20 | 21 | def read_batch(src): 22 | '''Unpack the pickle files 23 | ''' 24 | with open(src, 'rb') as f: 25 | if sys.version_info.major == 2: 26 | data = pickle.load(f) 27 | else: 28 | data = pickle.load(f, encoding='latin1') 29 | return data 30 | 31 | def download_cifar(src="http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"): 32 | '''Load the training and testing data 33 | ''' 34 | print ('Downloading ' + src) 35 | fname, h = urlretrieve(src, './delete.me') 36 | print ('Done.') 37 | try: 38 | print ('Extracting files...') 39 | with tarfile.open(fname) as tar: 40 | tar.extractall() 41 | print ('Done.') 42 | print ('Preparing train set...') 43 | train_list = [read_batch('./cifar-10-batches-py/data_batch_{0}'.format(i + 1)) for i in range(5)] 44 | x_train = np.concatenate([t['data'] for t in train_list]) 45 | y_train = np.concatenate([t['labels'] for t in train_list]) 46 | print ('Preparing test set...') 47 | tst = read_batch('./cifar-10-batches-py/test_batch') 48 | x_test = tst['data'] 49 | y_test = np.asarray(tst['labels']) 50 | print ('Done.') 51 | finally: 52 | os.remove(fname) 53 | return x_train, x_test, y_train, y_test 54 | 55 | def cifar_for_library(channel_first=True, one_hot=False): 56 | # Raw data 57 | x_train, x_test, y_train, y_test = download_cifar() 58 | # Scale pixel intensity 59 | x_train = x_train/255.0 60 | x_test = x_test/255.0 61 | # Reshape 62 | x_train = x_train.reshape(-1, 3, 32, 32) 63 | x_test = x_test.reshape(-1, 3, 32, 32) 64 | # Channel last 65 | if not channel_first: 66 | x_train = np.swapaxes(x_train, 1, 3) 67 | x_test = np.swapaxes(x_test, 1, 3) 68 | # One-hot encode y 69 | if one_hot: 70 | y_train = np.expand_dims(y_train, axis=-1) 71 | y_test = np.expand_dims(y_test, axis=-1) 72 | enc = OneHotEncoder(categorical_features='all') 73 | fit = enc.fit(y_train) 74 | y_train = fit.transform(y_train).toarray() 75 | y_test = fit.transform(y_test).toarray() 76 | # dtypes 77 | x_train = x_train.astype(np.float32) 78 | x_test = x_test.astype(np.float32) 79 | y_train = y_train.astype(np.int32) 80 | y_test = y_test.astype(np.int32) 81 | return x_train, x_test, y_train, y_test 82 | 83 | def shuffle_data(X, y): 84 | s = np.arange(len(X)) 85 | np.random.shuffle(s) 86 | X = X[s] 87 | y = y[s] 88 | return X, y 89 | 90 | def yield_mb(X, y, batchsize=64, shuffle=False): 91 | assert len(X) == len(y) 92 | if shuffle: 93 | X, y = shuffle_data(X, y) 94 | # Only complete batches are submitted 95 | for i in range(len(X)//batchsize): 96 | yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize] 97 | 98 | def get_gpu_name(): 99 | try: 100 | out_str = subprocess.run(["nvidia-smi", "--query-gpu=gpu_name", "--format=csv"], stdout=subprocess.PIPE).stdout 101 | out_list = out_str.decode("utf-8").split('\n') 102 | out_list = out_list[1:-1] 103 | return out_list 104 | except Exception as e: 105 | print(e) 106 | 107 | -------------------------------------------------------------------------------- /Students/3-keras/fashion_import.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import os 3 | 4 | from keras.utils.data_utils import get_file 5 | import numpy as np 6 | 7 | 8 | def load_data(): 9 | """Loads the Fashion-MNIST dataset. 10 | 11 | # Returns 12 | Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. 13 | """ 14 | dirname = os.path.join('datasets', 'fashion-mnist') 15 | base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' 16 | files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 17 | 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'] 18 | 19 | paths = [] 20 | for file in files: 21 | paths.append(get_file(file, origin=base + file, cache_subdir=dirname)) 22 | 23 | with gzip.open(paths[0], 'rb') as lbpath: 24 | y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) 25 | 26 | with gzip.open(paths[1], 'rb') as imgpath: 27 | x_train = np.frombuffer(imgpath.read(), np.uint8, 28 | offset=16).reshape(len(y_train), 28, 28) 29 | 30 | with gzip.open(paths[2], 'rb') as lbpath: 31 | y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) 32 | 33 | with gzip.open(paths[3], 'rb') as imgpath: 34 | x_test = np.frombuffer(imgpath.read(), np.uint8, 35 | offset=16).reshape(len(y_test), 28, 28) 36 | 37 | return (x_train, y_train), (x_test, y_test) 38 | -------------------------------------------------------------------------------- /Students/4-cntk-overview/cntk-overview.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/4-cntk-overview/cntk-overview.pptx -------------------------------------------------------------------------------- /Students/5-cntk-cifar10/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/5-cntk-cifar10/common/__init__.py -------------------------------------------------------------------------------- /Students/5-cntk-cifar10/common/params.py: -------------------------------------------------------------------------------- 1 | # Hyperparams 2 | EPOCHS = 10 3 | BATCHSIZE = 64 4 | LR = 0.01 5 | MOMENTUM = 0.9 6 | N_CLASSES = 10 7 | GPU = True -------------------------------------------------------------------------------- /Students/5-cntk-cifar10/common/utils.py: -------------------------------------------------------------------------------- 1 | from sklearn.datasets import fetch_mldata 2 | from sklearn.preprocessing import OneHotEncoder 3 | from sklearn.model_selection import train_test_split 4 | 5 | import numpy as np 6 | import os 7 | import tarfile 8 | import pickle 9 | import subprocess 10 | 11 | 12 | import sys 13 | if sys.version_info.major == 2: 14 | # Backward compatibility with python 2. 15 | from six.moves import urllib 16 | urlretrieve = urllib.request.urlretrieve 17 | else: 18 | from urllib.request import urlretrieve 19 | 20 | 21 | def read_batch(src): 22 | '''Unpack the pickle files 23 | ''' 24 | with open(src, 'rb') as f: 25 | if sys.version_info.major == 2: 26 | data = pickle.load(f) 27 | else: 28 | data = pickle.load(f, encoding='latin1') 29 | return data 30 | 31 | def download_cifar(src="http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"): 32 | '''Load the training and testing data 33 | ''' 34 | print ('Downloading ' + src) 35 | fname, h = urlretrieve(src, './delete.me') 36 | print ('Done.') 37 | try: 38 | print ('Extracting files...') 39 | with tarfile.open(fname) as tar: 40 | tar.extractall() 41 | print ('Done.') 42 | print ('Preparing train set...') 43 | train_list = [read_batch('./cifar-10-batches-py/data_batch_{0}'.format(i + 1)) for i in range(5)] 44 | x_train = np.concatenate([t['data'] for t in train_list]) 45 | y_train = np.concatenate([t['labels'] for t in train_list]) 46 | print ('Preparing test set...') 47 | tst = read_batch('./cifar-10-batches-py/test_batch') 48 | x_test = tst['data'] 49 | y_test = np.asarray(tst['labels']) 50 | print ('Done.') 51 | finally: 52 | os.remove(fname) 53 | return x_train, x_test, y_train, y_test 54 | 55 | def cifar_for_library(channel_first=True, one_hot=False): 56 | # Raw data 57 | x_train, x_test, y_train, y_test = download_cifar() 58 | # Scale pixel intensity 59 | x_train = x_train/255.0 60 | x_test = x_test/255.0 61 | # Reshape 62 | x_train = x_train.reshape(-1, 3, 32, 32) 63 | x_test = x_test.reshape(-1, 3, 32, 32) 64 | # Channel last 65 | if not channel_first: 66 | x_train = np.swapaxes(x_train, 1, 3) 67 | x_test = np.swapaxes(x_test, 1, 3) 68 | # One-hot encode y 69 | if one_hot: 70 | y_train = np.expand_dims(y_train, axis=-1) 71 | y_test = np.expand_dims(y_test, axis=-1) 72 | enc = OneHotEncoder(categorical_features='all') 73 | fit = enc.fit(y_train) 74 | y_train = fit.transform(y_train).toarray() 75 | y_test = fit.transform(y_test).toarray() 76 | # dtypes 77 | x_train = x_train.astype(np.float32) 78 | x_test = x_test.astype(np.float32) 79 | y_train = y_train.astype(np.int32) 80 | y_test = y_test.astype(np.int32) 81 | return x_train, x_test, y_train, y_test 82 | 83 | def shuffle_data(X, y): 84 | s = np.arange(len(X)) 85 | np.random.shuffle(s) 86 | X = X[s] 87 | y = y[s] 88 | return X, y 89 | 90 | def yield_mb(X, y, batchsize=64, shuffle=False): 91 | assert len(X) == len(y) 92 | if shuffle: 93 | X, y = shuffle_data(X, y) 94 | # Only complete batches are submitted 95 | for i in range(len(X)//batchsize): 96 | yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize] 97 | 98 | def get_gpu_name(): 99 | try: 100 | out_str = subprocess.run(["nvidia-smi", "--query-gpu=gpu_name", "--format=csv"], stdout=subprocess.PIPE).stdout 101 | out_list = out_str.decode("utf-8").split('\n') 102 | out_list = out_list[1:-1] 103 | return out_list 104 | except Exception as e: 105 | print(e) 106 | 107 | -------------------------------------------------------------------------------- /Students/6-style-transfer/content.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/6-style-transfer/content.jpg -------------------------------------------------------------------------------- /Students/6-style-transfer/style.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/6-style-transfer/style.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_28_42_Pro.bboxes.labels.tsv: -------------------------------------------------------------------------------- 1 | avocado 2 | orange 3 | ketchup 4 | onion 5 | eggBox 6 | joghurt 7 | gerkin 8 | pepper 9 | pepper 10 | champagne 11 | orangeJuice 12 | tomato 13 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_28_42_Pro.bboxes.tsv: -------------------------------------------------------------------------------- 1 | 756 411 972 563 2 | 789 593 1000 766 3 | 578 409 749 764 4 | 301 403 455 553 5 | 96 611 524 764 6 | 695 889 893 1183 7 | 420 883 603 1152 8 | 236 991 419 1183 9 | 4 958 234 1206 10 | 778 1267 1044 1651 11 | 526 1271 708 1624 12 | 65 1354 474 1599 13 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_28_42_Pro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_28_42_Pro.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_42_36_Pro.bboxes.labels.tsv: -------------------------------------------------------------------------------- 1 | gerkin 2 | tabasco 3 | pepper 4 | avocado 5 | tomato 6 | pepper 7 | orangeJuice 8 | milk 9 | milk 10 | tomato 11 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_42_36_Pro.bboxes.tsv: -------------------------------------------------------------------------------- 1 | 649 340 814 612 2 | 634 505 722 778 3 | 413 618 586 776 4 | 102 628 349 776 5 | 634 1025 904 1194 6 | 386 956 580 1150 7 | 131 954 338 1175 8 | 662 1261 841 1567 9 | 442 1250 626 1557 10 | 156 1425 300 1569 11 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_42_36_Pro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_42_36_Pro.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_46_03_Pro.bboxes.labels.tsv: -------------------------------------------------------------------------------- 1 | butter 2 | tomato 3 | tabasco 4 | tomato 5 | avocado 6 | gerkin 7 | water 8 | pepper 9 | eggBox 10 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_46_03_Pro.bboxes.tsv: -------------------------------------------------------------------------------- 1 | 616 680 831 785 2 | 424 684 566 774 3 | 211 480 290 778 4 | 607 1014 920 1167 5 | 399 1006 549 1183 6 | 84 889 317 1215 7 | 697 1277 948 1592 8 | 336 1284 541 1476 9 | 8 1294 319 1605 10 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_46_03_Pro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_46_03_Pro.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_48_26_Pro.bboxes.labels.tsv: -------------------------------------------------------------------------------- 1 | butter 2 | ketchup 3 | milk 4 | orangeJuice 5 | gerkin 6 | joghurt 7 | eggBox 8 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_48_26_Pro.bboxes.tsv: -------------------------------------------------------------------------------- 1 | 607 687 858 793 2 | 200 655 516 781 3 | 657 972 872 1198 4 | 286 972 536 1179 5 | 0 883 207 1194 6 | 415 1244 628 1582 7 | 6 1288 307 1626 8 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_48_26_Pro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_11_48_26_Pro.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_12_37_07_Pro.bboxes.labels.tsv: -------------------------------------------------------------------------------- 1 | mustard 2 | butter 3 | orangeJuice 4 | joghurt 5 | ketchup 6 | orange 7 | champagne 8 | water 9 | eggBox 10 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_12_37_07_Pro.bboxes.tsv: -------------------------------------------------------------------------------- 1 | 728 367 856 582 2 | 730 666 1020 781 3 | 246 561 618 783 4 | 803 899 1004 1217 5 | 509 1046 806 1229 6 | 234 977 467 1194 7 | 735 1313 972 1686 8 | 540 1323 733 1667 9 | 190 1294 509 1632 10 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_12_37_07_Pro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/Grocery/testImages/WIN_20160803_12_37_07_Pro.jpg -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/README.md: -------------------------------------------------------------------------------- 1 | # Finding Groceries Using Fast R-CNN in CNTK 2 | 3 | # Introduction 4 | 5 | We use Fast R-CNN to find rough locations and types of groceries in pictures. Please see the `FindingGroceriesInImages.ipynb` Jupyter Notebook for details. 6 | 7 | # Prerequisites 8 | 9 | Please create a Python 3.4 environment with the appropriate setup by: 10 | 11 | - Install Anaconda 12 | - Create a new Anaconda environment using the included environment.yml file 13 | - `conda env create --name fastrnn -f environment.yml python=3.4` 14 | - Install `scikit-image` and `opencv` using the pre-built Wheel files 15 | 16 | ## C Library code 17 | 18 | The Fast R-CNN implementation for CNTK depends on custom C code from the original Fast R-CNN [GitHub repo](https://github.com/rbgirshick/fast-rcnn) which has been built for 64-bit Windows and Python 3.4. In theory, building this code for other versions of Python and other operating systems is possible, but I have yet to do so. Once again, if you find yourself doing so, please submit a pull request as I'd love to extend this beyond just Windows. 19 | 20 | # Appendix 21 | 22 | ## Why Fast R-CNN? 23 | 24 | As you know if you watch the Deep Learning space, Fast R-CNN is _far_ from state-of-the-art for the Object Detection problem. This was _not_ true when we were working with our partner - we chose Fast R-CNN because the CNTK team had a beta version of their current [Example](https://github.com/Microsoft/CNTK/tree/master/Examples/Image/Detection/FastRCNN) and at the time getting the pipeline in place was more important than implementing Faster R-CNN (the best at the time) from scratch. We also knew that even Faster R-CNN's edge would erode quickly in this space, so getting them up and running and able to experiment as new techniques came along was more important. 25 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/environment.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | dependencies: 4 | - mkl=2017.0.3=0 5 | - numpy=1.11.3=py34_0 6 | - pip=9.0.1=py34_1 7 | - python=3.4.5=0 8 | - wheel=0.29.0=py34_0 9 | - pip: 10 | - backports-abc==0.5 11 | - bleach==2.0.0 12 | - https://cntk.ai/PythonWheel/GPU-1bit-SGD/cntk-2.1-cp34-cp34m-linux_x86_64.whl 13 | - colorama==0.3.9 14 | - cycler==0.10.0 15 | - decorator==4.1.2 16 | - easydict==1.7 17 | - entrypoints==0.2.3 18 | - future==0.16.0 19 | - html5lib==0.999999999 20 | - ipykernel==4.6.1 21 | - ipython==6.1.0 22 | - ipython-genutils==0.2.0 23 | - ipywidgets==6.0.0 24 | - jedi==0.10.2 25 | - jinja2==2.9.6 26 | - jsonschema==2.6.0 27 | - jupyter==1.0.0 28 | - jupyter-client==5.1.0 29 | - jupyter-console==5.1.0 30 | - jupyter-core==4.3.0 31 | - markupsafe==1.0 32 | - matplotlib==2.0.2 33 | - mistune==0.7.4 34 | - nbconvert==5.2.1 35 | - nbformat==4.3.0 36 | - networkx==1.11 37 | - notebook==5.0.0 38 | - olefile==0.44 39 | - opencv-python==3.3.0.10 40 | - pandocfilters==1.4.2 41 | - pickleshare==0.7.4 42 | - pillow==4.2.1 43 | - prompt-toolkit==1.0.15 44 | - pygments==2.2.0 45 | - pyparsing==2.2.0 46 | - python-dateutil==2.6.1 47 | - pytz==2017.2 48 | - pywavelets==0.5.2 49 | - pyzmq==16.0.2 50 | - qtconsole==4.3.1 51 | - scikit-image==0.13.0 52 | - scikit-learn==0.19.0 53 | - simplegeneric==0.8.1 54 | - six==1.10.0 55 | - testpath==0.3.1 56 | - tornado==4.5.1 57 | - traitlets==4.3.2 58 | - typing==3.6.2 59 | - wcwidth==0.1.7 60 | - webencodings==0.5.1 61 | - widgetsnbextension==2.0.0 62 | - win-unicode-console==0.5 63 | - xmltodict==0.11.0 64 | 65 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | from .imdb import imdb 8 | from .pascal_voc import pascal_voc 9 | 10 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/imdb.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import os 9 | import os.path as osp 10 | import PIL 11 | import numpy as np 12 | import scipy.sparse 13 | from builtins import range 14 | 15 | import sys 16 | from .utils.cython_bbox import bbox_overlaps 17 | 18 | class imdb(object): 19 | """Image database.""" 20 | 21 | def __init__(self, name): 22 | self._name = name 23 | self._num_classes = 0 24 | self._classes = [] 25 | self._image_index = [] 26 | self._obj_proposer = 'selective_search' 27 | self._roidb = None 28 | self._roidb_handler = self.default_roidb 29 | # Use this dict for storing dataset specific config options 30 | self.config = {} 31 | 32 | @property 33 | def name(self): 34 | return self._name 35 | 36 | @property 37 | def num_classes(self): 38 | return len(self._classes) 39 | 40 | @property 41 | def classes(self): 42 | return self._classes 43 | 44 | @property 45 | def image_index(self): 46 | return self._image_index 47 | 48 | @property 49 | def roidb_handler(self): 50 | return self._roidb_handler 51 | 52 | @roidb_handler.setter 53 | def roidb_handler(self, val): 54 | self._roidb_handler = val 55 | 56 | @property 57 | def roidb(self): 58 | # A roidb is a list of dictionaries, each with the following keys: 59 | # boxes 60 | # gt_overlaps 61 | # gt_classes 62 | # flipped 63 | if self._roidb is not None: 64 | return self._roidb 65 | self._roidb = self.roidb_handler() 66 | return self._roidb 67 | 68 | # @property 69 | # def cache_path(self): 70 | # cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache')) 71 | # print cache_path 72 | # if not os.path.exists(cache_path): 73 | # os.makedirs(cache_path) 74 | # return cache_path 75 | 76 | @property 77 | def num_images(self): 78 | return len(self.image_index) 79 | 80 | def image_path_at(self, i): 81 | raise NotImplementedError 82 | 83 | def default_roidb(self): 84 | raise NotImplementedError 85 | 86 | def evaluate_detections(self, all_boxes, output_dir=None): 87 | """ 88 | all_boxes is a list of length number-of-classes. 89 | Each list element is a list of length number-of-images. 90 | Each of those list elements is either an empty list [] 91 | or a numpy array of detection. 92 | 93 | all_boxes[class][image] = [] or np.array of shape #dets x 5 94 | """ 95 | raise NotImplementedError 96 | 97 | def append_flipped_images(self): 98 | num_images = self.num_images 99 | widths = [PIL.Image.open(self.image_path_at(i)).size[0] 100 | for i in range(num_images)] 101 | for i in range(num_images): 102 | boxes = self.roidb[i]['boxes'].copy() 103 | oldx1 = boxes[:, 0].copy() 104 | oldx2 = boxes[:, 2].copy() 105 | boxes[:, 0] = widths[i] - oldx2 - 1 106 | boxes[:, 2] = widths[i] - oldx1 - 1 107 | assert (boxes[:, 2] >= boxes[:, 0]).all() 108 | entry = {'boxes' : boxes, 109 | 'gt_overlaps' : self.roidb[i]['gt_overlaps'], 110 | 'gt_classes' : self.roidb[i]['gt_classes'], 111 | 'flipped' : True} 112 | self.roidb.append(entry) 113 | self._image_index = self._image_index * 2 114 | 115 | def evaluate_recall(self, candidate_boxes, ar_thresh=0.5): 116 | # Record max overlap value for each gt box 117 | # Return vector of overlap values 118 | gt_overlaps = np.zeros(0) 119 | for i in range(self.num_images): 120 | gt_inds = np.where(self.roidb[i]['gt_classes'] > 0)[0] 121 | gt_boxes = self.roidb[i]['boxes'][gt_inds, :] 122 | 123 | boxes = candidate_boxes[i] 124 | if boxes.shape[0] == 0: 125 | continue 126 | overlaps = bbox_overlaps(boxes.astype(np.float), 127 | gt_boxes.astype(np.float)) 128 | 129 | # gt_overlaps = np.hstack((gt_overlaps, overlaps.max(axis=0))) 130 | _gt_overlaps = np.zeros((gt_boxes.shape[0])) 131 | for j in range(gt_boxes.shape[0]): 132 | argmax_overlaps = overlaps.argmax(axis=0) 133 | max_overlaps = overlaps.max(axis=0) 134 | gt_ind = max_overlaps.argmax() 135 | gt_ovr = max_overlaps.max() 136 | assert(gt_ovr >= 0) 137 | box_ind = argmax_overlaps[gt_ind] 138 | _gt_overlaps[j] = overlaps[box_ind, gt_ind] 139 | assert(_gt_overlaps[j] == gt_ovr) 140 | overlaps[box_ind, :] = -1 141 | overlaps[:, gt_ind] = -1 142 | 143 | gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps)) 144 | 145 | num_pos = gt_overlaps.size 146 | gt_overlaps = np.sort(gt_overlaps) 147 | step = 0.001 148 | thresholds = np.minimum(np.arange(0.5, 1.0 + step, step), 1.0) 149 | recalls = np.zeros_like(thresholds) 150 | for i, t in enumerate(thresholds): 151 | recalls[i] = (gt_overlaps >= t).sum() / float(num_pos) 152 | ar = 2 * np.trapz(recalls, thresholds) 153 | 154 | return ar, gt_overlaps, recalls, thresholds 155 | 156 | def create_roidb_from_box_list(self, box_list, gt_roidb): 157 | assert len(box_list) == self.num_images, \ 158 | 'Number of boxes must match number of ground-truth images' 159 | roidb = [] 160 | for i in range(self.num_images): 161 | boxes = box_list[i] 162 | num_boxes = boxes.shape[0] 163 | overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32) 164 | 165 | if gt_roidb and gt_roidb[i]: 166 | gt_boxes = gt_roidb[i]['boxes'] 167 | gt_classes = gt_roidb[i]['gt_classes'] 168 | if len(gt_classes) > 0: #for pascal every image has at least one annotated object. This is not the case however if including negative images 169 | gt_overlaps = bbox_overlaps(boxes.astype(np.float), 170 | gt_boxes.astype(np.float)) 171 | 172 | argmaxes = gt_overlaps.argmax(axis=1) 173 | maxes = gt_overlaps.max(axis=1) 174 | I = np.where(maxes > 0)[0] 175 | overlaps[I, gt_classes[argmaxes[I]]] = maxes[I] 176 | 177 | overlaps = scipy.sparse.csr_matrix(overlaps) 178 | roidb.append({'boxes' : boxes, 179 | 'gt_classes' : np.zeros((num_boxes,), 180 | dtype=np.int32), 181 | 'gt_overlaps' : overlaps, 182 | 'flipped' : False}) 183 | return roidb 184 | 185 | @staticmethod 186 | def merge_roidbs(a, b): 187 | assert len(a) == len(b) 188 | for i in range(len(a)): 189 | if a[i]: #if image has at least one annotated object 190 | a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes'])) 191 | a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'], 192 | b[i]['gt_classes'])) 193 | a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'], 194 | b[i]['gt_overlaps']]) 195 | else: 196 | a[i] = b[i] 197 | return a 198 | 199 | def competition_mode(self, on): 200 | """Turn competition mode on or off.""" 201 | pass 202 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/nms.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | 10 | def nms(dets, thresh): 11 | x1 = dets[:, 0] 12 | y1 = dets[:, 1] 13 | x2 = dets[:, 2] 14 | y2 = dets[:, 3] 15 | scores = dets[:, 4] 16 | 17 | areas = (x2 - x1 + 1) * (y2 - y1 + 1) 18 | order = scores.argsort()[::-1] 19 | 20 | keep = [] 21 | while order.size > 0: 22 | i = order[0] 23 | keep.append(i) 24 | xx1 = np.maximum(x1[i], x1[order[1:]]) 25 | yy1 = np.maximum(y1[i], y1[order[1:]]) 26 | xx2 = np.minimum(x2[i], x2[order[1:]]) 27 | yy2 = np.minimum(y2[i], y2[order[1:]]) 28 | 29 | w = np.maximum(0.0, xx2 - xx1 + 1) 30 | h = np.maximum(0.0, yy2 - yy1 + 1) 31 | inter = w * h 32 | ovr = inter / (areas[i] + areas[order[1:]] - inter) 33 | 34 | inds = np.where(ovr <= thresh)[0] 35 | order = order[inds + 1] 36 | 37 | return keep -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/timer.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | 8 | import time 9 | 10 | class Timer(object): 11 | """A simple timer.""" 12 | def __init__(self): 13 | self.total_time = 0. 14 | self.calls = 0 15 | self.start_time = 0. 16 | self.diff = 0. 17 | self.average_time = 0. 18 | 19 | def tic(self): 20 | # using time.time instead of time.clock because time time.clock 21 | # does not normalize for multithreading 22 | self.start_time = time.time() 23 | 24 | def toc(self, average=True): 25 | self.diff = time.time() - self.start_time 26 | self.total_time += self.diff 27 | self.calls += 1 28 | self.average_time = self.total_time / self.calls 29 | if average: 30 | return self.average_time 31 | else: 32 | return self.diff 33 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/utils/cython_bbox.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/fastRCNN/utils/cython_bbox.pyd -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/utils/cython_bbox.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/fastRCNN/utils/cython_bbox.so -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/utils/cython_nms.pyd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/fastRCNN/utils/cython_nms.pyd -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/utils/cython_nms.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/7-object-detection-frnn/fastRCNN/utils/cython_nms.so -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/fastRCNN/voc_eval.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast/er R-CNN 3 | # Licensed under The MIT License [see LICENSE for details] 4 | # Written by Bharath Hariharan 5 | # -------------------------------------------------------- 6 | 7 | from __future__ import print_function 8 | import xml.etree.ElementTree as ET 9 | import os 10 | import pickle as cp 11 | import numpy as np 12 | 13 | def parse_rec(filename): 14 | """ Parse a PASCAL VOC xml file """ 15 | tree = ET.parse(filename) 16 | objects = [] 17 | for obj in tree.findall('object'): 18 | obj_struct = {} 19 | obj_struct['name'] = obj.find('name').text 20 | obj_struct['pose'] = obj.find('pose').text 21 | obj_struct['truncated'] = int(obj.find('truncated').text) 22 | obj_struct['difficult'] = int(obj.find('difficult').text) 23 | bbox = obj.find('bndbox') 24 | obj_struct['bbox'] = [int(bbox.find('xmin').text), 25 | int(bbox.find('ymin').text), 26 | int(bbox.find('xmax').text), 27 | int(bbox.find('ymax').text)] 28 | objects.append(obj_struct) 29 | 30 | return objects 31 | 32 | def voc_ap(rec, prec, use_07_metric=False): 33 | """ ap = voc_ap(rec, prec, [use_07_metric]) 34 | Compute VOC AP given precision and recall. 35 | If use_07_metric is true, uses the 36 | VOC 07 11 point method (default:False). 37 | """ 38 | if use_07_metric: 39 | # 11 point metric 40 | ap = 0. 41 | for t in np.arange(0., 1.1, 0.1): 42 | if np.sum(rec >= t) == 0: 43 | p = 0 44 | else: 45 | p = np.max(prec[rec >= t]) 46 | ap = ap + p / 11. 47 | else: 48 | # correct AP calculation 49 | # first append sentinel values at the end 50 | mrec = np.concatenate(([0.], rec, [1.])) 51 | mpre = np.concatenate(([0.], prec, [0.])) 52 | 53 | # compute the precision envelope 54 | for i in range(mpre.size - 1, 0, -1): 55 | mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) 56 | 57 | # to calculate area under PR curve, look for points 58 | # where X axis (recall) changes value 59 | i = np.where(mrec[1:] != mrec[:-1])[0] 60 | 61 | # and sum (\Delta recall) * prec 62 | ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) 63 | return ap 64 | 65 | def voc_eval(detpath, 66 | annopath, 67 | imagesetfile, 68 | classname, 69 | cachedir, 70 | ovthresh=0.5, 71 | use_07_metric=False): 72 | """rec, prec, ap = voc_eval(detpath, 73 | annopath, 74 | imagesetfile, 75 | classname, 76 | [ovthresh], 77 | [use_07_metric]) 78 | 79 | Top level function that does the PASCAL VOC evaluation. 80 | 81 | detpath: Path to detections 82 | detpath.format(classname) should produce the detection results file. 83 | annopath: Path to annotations 84 | annopath.format(imagename) should be the xml annotations file. 85 | imagesetfile: Text file containing the list of images, one image per line. 86 | classname: Category name (duh) 87 | cachedir: Directory for caching the annotations 88 | [ovthresh]: Overlap threshold (default = 0.5) 89 | [use_07_metric]: Whether to use VOC07's 11 point AP computation 90 | (default False) 91 | """ 92 | # assumes detections are in detpath.format(classname) 93 | # assumes annotations are in annopath.format(imagename) 94 | # assumes imagesetfile is a text file with each line an image name 95 | # cachedir caches the annotations in a pickle file 96 | 97 | # first load gt 98 | if cachedir: 99 | if not os.path.isdir(cachedir): 100 | os.mkdir(cachedir) 101 | cachefile = os.path.join(cachedir, 'annots.pkl') 102 | # read list of images 103 | with open(imagesetfile, 'r') as f: 104 | lines = f.readlines() 105 | imagenames = [x.strip() for x in lines] 106 | 107 | if not cachedir or not os.path.isfile(cachefile): 108 | # load annots 109 | recs = {} 110 | for i, imagename in enumerate(imagenames): 111 | recs[imagename] = parse_rec(annopath.format(imagename)) 112 | if i % 1000 == 0: 113 | print ('Reading annotation for {:d}/{:d}'.format( 114 | i + 1, len(imagenames))) 115 | # save 116 | if cachedir: 117 | print ('Saving cached annotations to {:s}'.format(cachefile)) 118 | with open(cachefile, 'wb') as f: 119 | cp.dump(recs, f) 120 | else: 121 | # load 122 | with open(cachefile, 'rb') as f: 123 | recs = cp.load(f) 124 | 125 | # extract gt objects for this class 126 | class_recs = {} 127 | npos = 0 128 | for imagename in imagenames: 129 | R = [obj for obj in recs[imagename] if obj['name'] == classname] 130 | bbox = np.array([x['bbox'] for x in R]) 131 | difficult = np.array([x['difficult'] for x in R]).astype(np.bool) 132 | det = [False] * len(R) 133 | npos = npos + sum(~difficult) 134 | class_recs[imagename] = {'bbox': bbox, 135 | 'difficult': difficult, 136 | 'det': det} 137 | 138 | # read dets 139 | detfile = detpath.format(classname) 140 | with open(detfile, 'r') as f: 141 | lines = f.readlines() 142 | 143 | splitlines = [x.strip().split(' ') for x in lines] 144 | image_ids = [x[0] for x in splitlines] 145 | confidence = np.array([float(x[1]) for x in splitlines]) 146 | BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) 147 | 148 | # sort by confidence 149 | sorted_ind = np.argsort(-confidence) 150 | sorted_scores = np.sort(-confidence) 151 | 152 | BB = BB[sorted_ind, :] 153 | image_ids = [image_ids[x] for x in sorted_ind] 154 | 155 | # go down dets and mark TPs and FPs 156 | nd = len(image_ids) 157 | tp = np.zeros(nd) 158 | fp = np.zeros(nd) 159 | for d in range(nd): 160 | R = class_recs[image_ids[d]] 161 | bb = BB[d, :].astype(float) 162 | ovmax = -np.inf 163 | BBGT = R['bbox'].astype(float) 164 | 165 | if BBGT.size > 0: 166 | # compute overlaps 167 | # intersection 168 | ixmin = np.maximum(BBGT[:, 0], bb[0]) 169 | iymin = np.maximum(BBGT[:, 1], bb[1]) 170 | ixmax = np.minimum(BBGT[:, 2], bb[2]) 171 | iymax = np.minimum(BBGT[:, 3], bb[3]) 172 | iw = np.maximum(ixmax - ixmin + 1., 0.) 173 | ih = np.maximum(iymax - iymin + 1., 0.) 174 | inters = iw * ih 175 | 176 | # union 177 | uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + 178 | (BBGT[:, 2] - BBGT[:, 0] + 1.) * 179 | (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters) 180 | 181 | overlaps = inters / uni 182 | ovmax = np.max(overlaps) 183 | jmax = np.argmax(overlaps) 184 | 185 | if ovmax > ovthresh: 186 | if not R['difficult'][jmax]: 187 | if not R['det'][jmax]: 188 | tp[d] = 1. 189 | R['det'][jmax] = 1 190 | else: 191 | fp[d] = 1. 192 | else: 193 | fp[d] = 1. 194 | 195 | # compute precision recall 196 | fp = np.cumsum(fp) 197 | tp = np.cumsum(tp) 198 | rec = tp / float(npos) 199 | # avoid divide by zero in case the first detection matches a difficult 200 | # ground truth 201 | prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) 202 | ap = voc_ap(rec, prec, use_07_metric) 203 | 204 | return rec, prec, ap 205 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/selectivesearch/README.md: -------------------------------------------------------------------------------- 1 | # Selective Search 2 | 3 | This code is a revision of the selective search implementation at: 4 | https://github.com/AlpacaDB/selectivesearch, such that the code can work under Python 3 environment. 5 | 6 | This file is based on or incorporates material from the projects listed below (Third Party OSS). The original copyright notice and the license under which Microsoft received such Third Party OSS, are set forth below. Such licenses and notices are provided for informational purposes only. Microsoft licenses the Third Party OSS to you under the licensing terms for the Microsoft product or service. Microsoft reserves all other rights not expressly granted under this agreement, whether by implication, estoppel or otherwise. 7 | 8 | `alpacadb-selectivesearch` 9 | Copyright (c) 2015-2016 AlpacaDB 10 | Copyright (c) 2016 Oussama ENNAFII 11 | 12 | Provided for Informational Purposes Only 13 | 14 | MIT License 15 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /Students/7-object-detection-frnn/selectivesearch/__init__.py: -------------------------------------------------------------------------------- 1 | from .selectivesearch import selective_search 2 | 3 | -------------------------------------------------------------------------------- /Students/9-imdb/LSTM_Keras_CNTK_IMDB.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# High-level LSTM Keras (CNTK) Example" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stderr", 17 | "output_type": "stream", 18 | "text": [ 19 | "Using CNTK backend\n" 20 | ] 21 | } 22 | ], 23 | "source": [ 24 | "import os\n", 25 | "import sys\n", 26 | "import numpy as np\n", 27 | "os.environ['KERAS_BACKEND'] = \"cntk\"\n", 28 | "import keras as K\n", 29 | "import cntk\n", 30 | "from keras.models import Sequential\n", 31 | "from keras.layers import Dense, Embedding, GRU\n", 32 | "from common.params_lstm import *\n", 33 | "from common.utils import *" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 2, 39 | "metadata": {}, 40 | "outputs": [ 41 | { 42 | "name": "stdout", 43 | "output_type": "stream", 44 | "text": [ 45 | "OS: linux\n", 46 | "Python: 3.5.2 |Anaconda custom (64-bit)| (default, Jul 2 2016, 17:53:06) \n", 47 | "[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]\n", 48 | "Keras: 2.0.8\n", 49 | "Numpy: 1.13.1\n", 50 | "CNTK: 2.1\n", 51 | "cntk\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "print(\"OS: \", sys.platform)\n", 57 | "print(\"Python: \", sys.version)\n", 58 | "print(\"Keras: \", K.__version__)\n", 59 | "print(\"Numpy: \", np.__version__)\n", 60 | "print(\"CNTK: \", cntk.__version__)\n", 61 | "print(K.backend.backend())" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 3, 67 | "metadata": { 68 | "collapsed": true 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "def create_symbol():\n", 73 | " model = Sequential()\n", 74 | " model.add(Embedding(MAXFEATURES, EMBEDSIZE, input_length=MAXLEN))\n", 75 | " model.add(GRU(NUMHIDDEN))\n", 76 | " model.add(Dense(2, activation='softmax'))\n", 77 | " return model" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 4, 83 | "metadata": { 84 | "collapsed": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "def init_model(m):\n", 89 | " m.compile(\n", 90 | " loss = \"categorical_crossentropy\",\n", 91 | " optimizer = K.optimizers.Adam(LR, BETA_1, BETA_2, EPS),\n", 92 | " metrics = ['accuracy'])\n", 93 | " return m" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "metadata": {}, 100 | "outputs": [ 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Downloading https://s3.amazonaws.com/text-datasets/imdb.npz\n", 106 | "Done.\n", 107 | "Extracting files...\n", 108 | "Done.\n", 109 | "Trimming to 25000 max-features\n", 110 | "Padding to length 150\n", 111 | "(25000, 150) (25000, 150) (25000, 2) (25000, 2)\n", 112 | "int32 int32 int32 int32\n", 113 | "CPU times: user 5.9 s, sys: 260 ms, total: 6.16 s\n", 114 | "Wall time: 7.27 s\n" 115 | ] 116 | } 117 | ], 118 | "source": [ 119 | "%%time\n", 120 | "# Data into format for library\n", 121 | "x_train, x_test, y_train, y_test = imdb_for_library(seq_len=MAXLEN, max_features=MAXFEATURES, one_hot=True)\n", 122 | "print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\n", 123 | "print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype)" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 6, 129 | "metadata": {}, 130 | "outputs": [ 131 | { 132 | "name": "stdout", 133 | "output_type": "stream", 134 | "text": [ 135 | "CPU times: user 240 ms, sys: 608 ms, total: 848 ms\n", 136 | "Wall time: 613 ms\n" 137 | ] 138 | } 139 | ], 140 | "source": [ 141 | "%%time\n", 142 | "# Load symbol\n", 143 | "sym = create_symbol()" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 7, 149 | "metadata": {}, 150 | "outputs": [ 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "CPU times: user 8 ms, sys: 40 ms, total: 48 ms\n", 156 | "Wall time: 10.1 ms\n" 157 | ] 158 | } 159 | ], 160 | "source": [ 161 | "%%time\n", 162 | "# Initialise model\n", 163 | "model = init_model(sym)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": 8, 169 | "metadata": {}, 170 | "outputs": [ 171 | { 172 | "name": "stdout", 173 | "output_type": "stream", 174 | "text": [ 175 | "_________________________________________________________________\n", 176 | "Layer (type) Output Shape Param # \n", 177 | "=================================================================\n", 178 | "embedding_1 (Embedding) (None, 150, 150) 3750000 \n", 179 | "_________________________________________________________________\n", 180 | "gru_1 (GRU) (None, 100) 75300 \n", 181 | "_________________________________________________________________\n", 182 | "dense_1 (Dense) (None, 2) 202 \n", 183 | "=================================================================\n", 184 | "Total params: 3,825,502\n", 185 | "Trainable params: 3,825,502\n", 186 | "Non-trainable params: 0\n", 187 | "_________________________________________________________________\n" 188 | ] 189 | } 190 | ], 191 | "source": [ 192 | "model.summary()" 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 9, 198 | "metadata": {}, 199 | "outputs": [ 200 | { 201 | "name": "stdout", 202 | "output_type": "stream", 203 | "text": [ 204 | "Epoch 1/3\n", 205 | "25000/25000 [==============================] - 74s - loss: 0.4745 - acc: 0.7541 \n", 206 | "Epoch 2/3\n", 207 | "25000/25000 [==============================] - 74s - loss: 0.2188 - acc: 0.9179 - ETA: 5s -\n", 208 | "Epoch 3/3\n", 209 | "25000/25000 [==============================] - 74s - loss: 0.1189 - acc: 0.9606 \n", 210 | "CPU times: user 3min 18s, sys: 26.3 s, total: 3min 44s\n", 211 | "Wall time: 3min 43s\n" 212 | ] 213 | }, 214 | { 215 | "data": { 216 | "text/plain": [ 217 | "" 218 | ] 219 | }, 220 | "execution_count": 9, 221 | "metadata": {}, 222 | "output_type": "execute_result" 223 | } 224 | ], 225 | "source": [ 226 | "%%time\n", 227 | "# Train model\n", 228 | "model.fit(x_train,\n", 229 | " y_train,\n", 230 | " batch_size=BATCHSIZE,\n", 231 | " epochs=EPOCHS,\n", 232 | " verbose=1)" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": 10, 238 | "metadata": {}, 239 | "outputs": [ 240 | { 241 | "name": "stdout", 242 | "output_type": "stream", 243 | "text": [ 244 | "CPU times: user 30.7 s, sys: 4.42 s, total: 35.1 s\n", 245 | "Wall time: 35.2 s\n" 246 | ] 247 | } 248 | ], 249 | "source": [ 250 | "%%time\n", 251 | "y_guess = model.predict(x_test, batch_size=BATCHSIZE)\n", 252 | "y_guess = np.argmax(y_guess, axis=-1)\n", 253 | "y_truth = np.argmax(y_test, axis=-1)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 11, 259 | "metadata": {}, 260 | "outputs": [ 261 | { 262 | "name": "stdout", 263 | "output_type": "stream", 264 | "text": [ 265 | "Accuracy: 0.85932\n" 266 | ] 267 | } 268 | ], 269 | "source": [ 270 | "print(\"Accuracy: \", sum(y_guess == y_truth)/len(y_guess))" 271 | ] 272 | } 273 | ], 274 | "metadata": { 275 | "anaconda-cloud": {}, 276 | "kernelspec": { 277 | "display_name": "Python 3", 278 | "language": "python", 279 | "name": "python3" 280 | }, 281 | "language_info": { 282 | "codemirror_mode": { 283 | "name": "ipython", 284 | "version": 3 285 | }, 286 | "file_extension": ".py", 287 | "mimetype": "text/x-python", 288 | "name": "python", 289 | "nbconvert_exporter": "python", 290 | "pygments_lexer": "ipython3", 291 | "version": "3.5.2" 292 | } 293 | }, 294 | "nbformat": 4, 295 | "nbformat_minor": 2 296 | } 297 | -------------------------------------------------------------------------------- /Students/9-imdb/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/Students/9-imdb/common/__init__.py -------------------------------------------------------------------------------- /Students/9-imdb/common/params_lstm.py: -------------------------------------------------------------------------------- 1 | # Hyperparams LSTM 2 | EPOCHS=3 3 | BATCHSIZE=64 4 | EMBEDSIZE=125 5 | NUMHIDDEN=100 6 | DROPOUT=0.2 7 | LR=0.001 8 | BETA_1=0.9 9 | BETA_2=0.999 10 | EPS=1e-08 11 | MAXLEN=150 12 | MAXFEATURES=20000 13 | GPU=True -------------------------------------------------------------------------------- /Students/9-imdb/common/utils.py: -------------------------------------------------------------------------------- 1 | from sklearn.datasets import fetch_mldata 2 | from sklearn.preprocessing import OneHotEncoder 3 | from sklearn.model_selection import train_test_split 4 | 5 | import numpy as np 6 | import os 7 | import tarfile 8 | import pickle 9 | import subprocess 10 | import sys 11 | if sys.version_info.major == 2: 12 | # Backward compatibility with python 2. 13 | from six.moves import urllib 14 | urlretrieve = urllib.request.urlretrieve 15 | else: 16 | from urllib.request import urlretrieve 17 | 18 | 19 | def get_gpu_name(): 20 | try: 21 | out_str = subprocess.run(["nvidia-smi", "--query-gpu=gpu_name", "--format=csv"], stdout=subprocess.PIPE).stdout 22 | out_list = out_str.decode("utf-8").split('\n') 23 | out_list = out_list[1:-1] 24 | return out_list 25 | except Exception as e: 26 | print(e) 27 | 28 | def read_batch(src): 29 | '''Unpack the pickle files 30 | ''' 31 | with open(src, 'rb') as f: 32 | if sys.version_info.major == 2: 33 | data = pickle.load(f) 34 | else: 35 | data = pickle.load(f, encoding='latin1') 36 | return data 37 | 38 | def shuffle_data(X, y): 39 | s = np.arange(len(X)) 40 | np.random.shuffle(s) 41 | X = X[s] 42 | y = y[s] 43 | return X, y 44 | 45 | def yield_mb(X, y, batchsize=64, shuffle=False): 46 | assert len(X) == len(y) 47 | if shuffle: 48 | X, y = shuffle_data(X, y) 49 | # Only complete batches are submitted 50 | for i in range(len(X)//batchsize): 51 | yield X[i*batchsize:(i+1)*batchsize], y[i*batchsize:(i+1)*batchsize] 52 | 53 | def download_cifar(src="http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"): 54 | '''Load the training and testing data 55 | ''' 56 | # FLAG: should we host this on azure? 57 | print ('Downloading ' + src) 58 | fname, h = urlretrieve(src, './delete.me') 59 | print ('Done.') 60 | try: 61 | print ('Extracting files...') 62 | with tarfile.open(fname) as tar: 63 | tar.extractall() 64 | print ('Done.') 65 | print ('Preparing train set...') 66 | train_list = [read_batch('./cifar-10-batches-py/data_batch_{0}'.format(i + 1)) for i in range(5)] 67 | x_train = np.concatenate([t['data'] for t in train_list]) 68 | y_train = np.concatenate([t['labels'] for t in train_list]) 69 | print ('Preparing test set...') 70 | tst = read_batch('./cifar-10-batches-py/test_batch') 71 | x_test = tst['data'] 72 | y_test = np.asarray(tst['labels']) 73 | print ('Done.') 74 | finally: 75 | os.remove(fname) 76 | return x_train, x_test, y_train, y_test 77 | 78 | def download_imdb(src="https://s3.amazonaws.com/text-datasets/imdb.npz"): 79 | '''Load the training and testing data 80 | ''' 81 | # FLAG: should we host this on azure? 82 | print ('Downloading ' + src) 83 | fname, h = urlretrieve(src, './delete.me') 84 | print ('Done.') 85 | try: 86 | print ('Extracting files...') 87 | with np.load(fname) as f: 88 | x_train, y_train = f['x_train'], f['y_train'] 89 | x_test, y_test = f['x_test'], f['y_test'] 90 | print ('Done.') 91 | finally: 92 | os.remove(fname) 93 | return x_train, x_test, y_train, y_test 94 | 95 | def cifar_for_library(channel_first=True, one_hot=False): 96 | # Raw data 97 | x_train, x_test, y_train, y_test = download_cifar() 98 | # Scale pixel intensity 99 | x_train = x_train/255.0 100 | x_test = x_test/255.0 101 | # Reshape 102 | x_train = x_train.reshape(-1, 3, 32, 32) 103 | x_test = x_test.reshape(-1, 3, 32, 32) 104 | # Channel last 105 | if not channel_first: 106 | x_train = np.swapaxes(x_train, 1, 3) 107 | x_test = np.swapaxes(x_test, 1, 3) 108 | # One-hot encode y 109 | if one_hot: 110 | y_train = np.expand_dims(y_train, axis=-1) 111 | y_test = np.expand_dims(y_test, axis=-1) 112 | enc = OneHotEncoder(categorical_features='all') 113 | fit = enc.fit(y_train) 114 | y_train = fit.transform(y_train).toarray() 115 | y_test = fit.transform(y_test).toarray() 116 | # dtypes 117 | x_train = x_train.astype(np.float32) 118 | x_test = x_test.astype(np.float32) 119 | y_train = y_train.astype(np.int32) 120 | y_test = y_test.astype(np.int32) 121 | return x_train, x_test, y_train, y_test 122 | 123 | def imdb_for_library(seq_len=100, max_features=20000, one_hot=False): 124 | ''' Replicates same pre-processing as: 125 | https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py 126 | 127 | I'm not sure if we want to load another version of IMDB that has got 128 | words, but if it does have words we would still convert to index in this 129 | backend script that is not meant for others to see ... 130 | 131 | But I'm worried this obfuscates the data a bit? 132 | ''' 133 | # 0 (padding), 1 (start), 2 (OOV) 134 | START_CHAR=1 135 | OOV_CHAR=2 136 | INDEX_FROM=3 137 | # Raw data (has been encoded into words already) 138 | x_train, x_test, y_train, y_test = download_imdb() 139 | # Combine for processing 140 | idx = len(x_train) 141 | _xs = np.concatenate([x_train, x_test]) 142 | # Words will start from INDEX_FROM (shift by 3) 143 | _xs = [[START_CHAR] + [w + INDEX_FROM for w in x] for x in _xs] 144 | # Max-features - replace words bigger than index with oov_char 145 | # E.g. if max_features = 5 then keep 0, 1, 2, 3, 4 i.e. words 3 and 4 146 | if max_features: 147 | print("Trimming to {} max-features".format(max_features)) 148 | _xs = [[w if (w < max_features) else OOV_CHAR for w in x] for x in _xs] 149 | # Pad to same sequences 150 | print("Padding to length {}".format(seq_len)) 151 | xs = np.zeros((len(_xs), seq_len), dtype=np.int) 152 | for o_idx, obs in enumerate(_xs): 153 | # Match keras pre-processing of taking last elements 154 | obs = obs[-seq_len:] 155 | for i_idx in range(len(obs)): 156 | if i_idx < seq_len: 157 | xs[o_idx][i_idx] = obs[i_idx] 158 | # One-hot 159 | if one_hot: 160 | y_train = np.expand_dims(y_train, axis=-1) 161 | y_test = np.expand_dims(y_test, axis=-1) 162 | enc = OneHotEncoder(categorical_features='all') 163 | fit = enc.fit(y_train) 164 | y_train = fit.transform(y_train).toarray() 165 | y_test = fit.transform(y_test).toarray() 166 | # dtypes 167 | x_train = np.array(xs[:idx]).astype(np.int32) 168 | x_test = np.array(xs[idx:]).astype(np.int32) 169 | y_train = y_train.astype(np.int32) 170 | y_test = y_test.astype(np.int32) 171 | return x_train, x_test, y_train, y_test 172 | -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_33_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_33_1.png -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_34_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_34_1.png -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_37_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_37_0.png -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_38_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_38_0.png -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_64_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_64_0.png -------------------------------------------------------------------------------- /docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_65_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/CNTK-Transfer-Cats-Dogs_files/CNTK-Transfer-Cats-Dogs_65_0.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_13_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_13_0.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_17_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_17_0.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_0.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_1.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_10.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_11.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_12.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_13.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_14.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_15.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_16.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_17.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_18.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_19.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_2.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_20.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_21.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_22.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_23.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_23.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_24.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_24.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_25.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_26.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_26.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_27.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_27.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_28.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_28.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_29.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_29.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_3.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_30.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_31.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_31.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_32.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_33.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_33.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_34.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_34.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_35.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_35.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_36.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_37.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_37.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_38.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_38.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_39.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_39.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_4.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_40.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_40.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_5.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_6.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_7.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_8.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_22_9.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_1.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_10.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_11.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_12.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_13.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_14.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_15.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_16.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_17.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_18.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_19.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_2.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_20.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_21.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_22.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_23.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_23.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_24.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_24.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_25.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_25.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_26.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_26.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_27.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_27.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_28.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_28.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_29.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_29.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_3.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_30.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_31.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_31.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_32.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_33.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_33.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_34.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_34.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_35.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_35.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_36.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_37.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_37.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_38.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_38.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_39.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_39.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_4.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_40.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_40.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_41.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_41.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_5.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_6.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_7.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_8.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_24_9.png -------------------------------------------------------------------------------- /docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_7_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Network-Visualization-TensorFlow_files/Network-Visualization-TensorFlow_7_0.png -------------------------------------------------------------------------------- /docs/Synthesizing-Celebs-BEGAN_files/Synthesizing-Celebs-BEGAN_29_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/Synthesizing-Celebs-BEGAN_files/Synthesizing-Celebs-BEGAN_29_0.png -------------------------------------------------------------------------------- /docs/azure-deep-learning.epub: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/azure-deep-learning.epub -------------------------------------------------------------------------------- /docs/azure-deep-learning.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/azure-deep-learning.pdf -------------------------------------------------------------------------------- /docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_50_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_50_1.png -------------------------------------------------------------------------------- /docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_52_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_52_0.png -------------------------------------------------------------------------------- /docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_61_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_61_1.png -------------------------------------------------------------------------------- /docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_64_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/how-to-make-a-racist-ai-without-really-trying_files/how-to-make-a-racist-ai-without-really-trying_64_1.png -------------------------------------------------------------------------------- /docs/imgs/azlogin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/imgs/azlogin.png -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/css/fontawesome/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/libs/gitbook-2.6.7/css/fontawesome/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/css/plugin-bookdown.css: -------------------------------------------------------------------------------- 1 | .book .book-header h1 { 2 | padding-left: 20px; 3 | padding-right: 20px; 4 | } 5 | .book .book-header.fixed { 6 | position: fixed; 7 | right: 0; 8 | top: 0; 9 | left: 0; 10 | border-bottom: 1px solid rgba(0,0,0,.07); 11 | } 12 | span.search-highlight { 13 | background-color: #ffff88; 14 | } 15 | @media (min-width: 600px) { 16 | .book.with-summary .book-header.fixed { 17 | left: 300px; 18 | } 19 | } 20 | @media (max-width: 1240px) { 21 | .book .book-body.fixed { 22 | top: 50px; 23 | } 24 | .book .book-body.fixed .body-inner { 25 | top: auto; 26 | } 27 | } 28 | @media (max-width: 600px) { 29 | .book.with-summary .book-header.fixed { 30 | left: calc(100% - 60px); 31 | min-width: 300px; 32 | } 33 | .book.with-summary .book-body { 34 | transform: none; 35 | left: calc(100% - 60px); 36 | min-width: 300px; 37 | } 38 | .book .book-body.fixed { 39 | top: 0; 40 | } 41 | } 42 | 43 | .book .book-body.fixed .body-inner { 44 | top: 50px; 45 | } 46 | .book .book-body .page-wrapper .page-inner section.normal sub, .book .book-body .page-wrapper .page-inner section.normal sup { 47 | font-size: 85%; 48 | } 49 | 50 | @media print { 51 | .book .book-summary, .book .book-body .book-header, .fa { 52 | display: none !important; 53 | } 54 | .book .book-body.fixed { 55 | left: 0px; 56 | } 57 | .book .book-body,.book .book-body .body-inner, .book.with-summary { 58 | overflow: visible !important; 59 | } 60 | } 61 | .kable_wrapper { 62 | border-spacing: 20px 0; 63 | border-collapse: separate; 64 | border: none; 65 | margin: auto; 66 | } 67 | .kable_wrapper > tbody > tr > td { 68 | vertical-align: top; 69 | } 70 | .book .book-body .page-wrapper .page-inner section.normal table tr.header { 71 | border-top-width: 2px; 72 | } 73 | .book .book-body .page-wrapper .page-inner section.normal table tr:last-child td { 74 | border-bottom-width: 2px; 75 | } 76 | .book .book-body .page-wrapper .page-inner section.normal table td, .book .book-body .page-wrapper .page-inner section.normal table th { 77 | border-left: none; 78 | border-right: none; 79 | } 80 | .book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr, .book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr > td { 81 | border-top: none; 82 | } 83 | .book .book-body .page-wrapper .page-inner section.normal table.kable_wrapper > tbody > tr:last-child > td { 84 | border-bottom: none; 85 | } 86 | 87 | div.theorem, div.lemma, div.corollary, div.proposition { 88 | font-style: italic; 89 | } 90 | span.theorem, span.lemma, span.corollary, span.proposition { 91 | font-style: normal; 92 | } 93 | div.proof:after { 94 | content: "\25a2"; 95 | float: right; 96 | } 97 | .header-section-number { 98 | padding-right: .5em; 99 | } 100 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/css/plugin-search.css: -------------------------------------------------------------------------------- 1 | .book .book-summary .book-search { 2 | padding: 6px; 3 | background: transparent; 4 | position: absolute; 5 | top: -50px; 6 | left: 0px; 7 | right: 0px; 8 | transition: top 0.5s ease; 9 | } 10 | .book .book-summary .book-search input, 11 | .book .book-summary .book-search input:focus, 12 | .book .book-summary .book-search input:hover { 13 | width: 100%; 14 | background: transparent; 15 | border: 1px solid #ccc; 16 | box-shadow: none; 17 | outline: none; 18 | line-height: 22px; 19 | padding: 7px 4px; 20 | color: inherit; 21 | box-sizing: border-box; 22 | } 23 | .book.with-search .book-summary .book-search { 24 | top: 0px; 25 | } 26 | .book.with-search .book-summary ul.summary { 27 | top: 50px; 28 | } 29 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/js/jquery.highlight.js: -------------------------------------------------------------------------------- 1 | gitbook.require(["jQuery"], function(jQuery) { 2 | 3 | /* 4 | * jQuery Highlight plugin 5 | * 6 | * Based on highlight v3 by Johann Burkard 7 | * http://johannburkard.de/blog/programming/javascript/highlight-javascript-text-higlighting-jquery-plugin.html 8 | * 9 | * Code a little bit refactored and cleaned (in my humble opinion). 10 | * Most important changes: 11 | * - has an option to highlight only entire words (wordsOnly - false by default), 12 | * - has an option to be case sensitive (caseSensitive - false by default) 13 | * - highlight element tag and class names can be specified in options 14 | * 15 | * Copyright (c) 2009 Bartek Szopka 16 | * 17 | * Licensed under MIT license. 18 | * 19 | */ 20 | 21 | jQuery.extend({ 22 | highlight: function (node, re, nodeName, className) { 23 | if (node.nodeType === 3) { 24 | var match = node.data.match(re); 25 | if (match) { 26 | var highlight = document.createElement(nodeName || 'span'); 27 | highlight.className = className || 'highlight'; 28 | var wordNode = node.splitText(match.index); 29 | wordNode.splitText(match[0].length); 30 | var wordClone = wordNode.cloneNode(true); 31 | highlight.appendChild(wordClone); 32 | wordNode.parentNode.replaceChild(highlight, wordNode); 33 | return 1; //skip added node in parent 34 | } 35 | } else if ((node.nodeType === 1 && node.childNodes) && // only element nodes that have children 36 | !/(script|style)/i.test(node.tagName) && // ignore script and style nodes 37 | !(node.tagName === nodeName.toUpperCase() && node.className === className)) { // skip if already highlighted 38 | for (var i = 0; i < node.childNodes.length; i++) { 39 | i += jQuery.highlight(node.childNodes[i], re, nodeName, className); 40 | } 41 | } 42 | return 0; 43 | } 44 | }); 45 | 46 | jQuery.fn.unhighlight = function (options) { 47 | var settings = { className: 'highlight', element: 'span' }; 48 | jQuery.extend(settings, options); 49 | 50 | return this.find(settings.element + "." + settings.className).each(function () { 51 | var parent = this.parentNode; 52 | parent.replaceChild(this.firstChild, this); 53 | parent.normalize(); 54 | }).end(); 55 | }; 56 | 57 | jQuery.fn.highlight = function (words, options) { 58 | var settings = { className: 'highlight', element: 'span', caseSensitive: false, wordsOnly: false }; 59 | jQuery.extend(settings, options); 60 | 61 | if (words.constructor === String) { 62 | words = [words]; 63 | } 64 | words = jQuery.grep(words, function(word, i){ 65 | return word !== ''; 66 | }); 67 | words = jQuery.map(words, function(word, i) { 68 | return word.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&"); 69 | }); 70 | if (words.length === 0) { return this; } 71 | 72 | var flag = settings.caseSensitive ? "" : "i"; 73 | var pattern = "(" + words.join("|") + ")"; 74 | if (settings.wordsOnly) { 75 | pattern = "\\b" + pattern + "\\b"; 76 | } 77 | var re = new RegExp(pattern, flag); 78 | 79 | return this.each(function () { 80 | jQuery.highlight(this, re, settings.element, settings.className); 81 | }); 82 | }; 83 | 84 | }); 85 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/js/plugin-bookdown.js: -------------------------------------------------------------------------------- 1 | gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { 2 | 3 | var gs = gitbook.storage; 4 | 5 | gitbook.events.bind("start", function(e, config) { 6 | 7 | // add the Edit button (edit on Github) 8 | var edit = config.edit; 9 | if (edit && edit.link) gitbook.toolbar.createButton({ 10 | icon: 'fa fa-edit', 11 | label: edit.text || 'Edit', 12 | position: 'left', 13 | onClick: function(e) { 14 | e.preventDefault(); 15 | window.open(edit.link); 16 | } 17 | }); 18 | 19 | var down = config.download; 20 | var normalizeDownload = function() { 21 | if (!down || !(down instanceof Array) || down.length === 0) return; 22 | if (down[0] instanceof Array) return down; 23 | return $.map(down, function(file, i) { 24 | return [[file, file.replace(/.*[.]/g, '').toUpperCase()]]; 25 | }); 26 | }; 27 | down = normalizeDownload(down); 28 | if (down) if (down.length === 1 && /[.]pdf$/.test(down[0][0])) { 29 | gitbook.toolbar.createButton({ 30 | icon: 'fa fa-file-pdf-o', 31 | label: down[0][1], 32 | position: 'left', 33 | onClick: function(e) { 34 | e.preventDefault(); 35 | window.open(down[0][0]); 36 | } 37 | }); 38 | } else { 39 | gitbook.toolbar.createButton({ 40 | icon: 'fa fa-download', 41 | label: 'Download', 42 | position: 'left', 43 | dropdown: $.map(down, function(item, i) { 44 | return { 45 | text: item[1], 46 | onClick: function(e) { 47 | e.preventDefault(); 48 | window.open(item[0]); 49 | } 50 | }; 51 | }) 52 | }); 53 | } 54 | 55 | // highlight the current section in TOC 56 | var href = window.location.pathname; 57 | href = href.substr(href.lastIndexOf('/') + 1); 58 | if (href === '') href = 'index.html'; 59 | var li = $('a[href^="' + href + location.hash + '"]').parent('li.chapter').first(); 60 | var summary = $('ul.summary'), chaps = summary.find('li.chapter'); 61 | if (li.length === 0) li = chaps.first(); 62 | li.addClass('active'); 63 | chaps.on('click', function(e) { 64 | chaps.removeClass('active'); 65 | $(this).addClass('active'); 66 | gs.set('tocScrollTop', summary.scrollTop()); 67 | }); 68 | 69 | var toc = config.toc; 70 | // collapse TOC items that are not for the current chapter 71 | if (toc && toc.collapse) (function() { 72 | var type = toc.collapse; 73 | if (type === 'none') return; 74 | if (type !== 'section' && type !== 'subsection') return; 75 | // sections under chapters 76 | var toc_sub = summary.children('li[data-level]').children('ul'); 77 | if (type === 'section') { 78 | toc_sub.hide() 79 | .parent().has(li).children('ul').show(); 80 | } else { 81 | toc_sub.children('li').children('ul').hide() 82 | .parent().has(li).children('ul').show(); 83 | } 84 | li.children('ul').show(); 85 | var toc_sub2 = toc_sub.children('li'); 86 | if (type === 'section') toc_sub2.children('ul').hide(); 87 | summary.children('li[data-level]').find('a') 88 | .on('click.bookdown', function(e) { 89 | if (href === $(this).attr('href').replace(/#.*/, '')) 90 | $(this).parent('li').children('ul').toggle(); 91 | }); 92 | })(); 93 | 94 | // add tooltips to the 's that are truncated 95 | $('a').each(function(i, el) { 96 | if (el.offsetWidth >= el.scrollWidth) return; 97 | if (typeof el.title === 'undefined') return; 98 | el.title = el.text; 99 | }); 100 | 101 | // restore TOC scroll position 102 | var pos = gs.get('tocScrollTop'); 103 | if (typeof pos !== 'undefined') summary.scrollTop(pos); 104 | 105 | // highlight the TOC item that has same text as the heading in view as scrolling 106 | if (toc && toc.scroll_highlight !== false) (function() { 107 | // scroll the current TOC item into viewport 108 | var ht = $(window).height(), rect = li[0].getBoundingClientRect(); 109 | if (rect.top >= ht || rect.top <= 0 || rect.bottom <= 0) { 110 | summary.scrollTop(li[0].offsetTop); 111 | } 112 | // current chapter TOC items 113 | var items = $('a[href^="' + href + '"]').parent('li.chapter'), 114 | m = items.length; 115 | if (m === 0) { 116 | items = summary.find('li.chapter'); 117 | m = items.length; 118 | } 119 | if (m === 0) return; 120 | // all section titles on current page 121 | var hs = bookInner.find('.page-inner').find('h1,h2,h3'), n = hs.length, 122 | ts = hs.map(function(i, el) { return $(el).text(); }); 123 | if (n === 0) return; 124 | var scrollHandler = function(e) { 125 | var ht = $(window).height(); 126 | clearTimeout($.data(this, 'scrollTimer')); 127 | $.data(this, 'scrollTimer', setTimeout(function() { 128 | // find the first visible title in the viewport 129 | for (var i = 0; i < n; i++) { 130 | var rect = hs[i].getBoundingClientRect(); 131 | if (rect.top >= 0 && rect.bottom <= ht) break; 132 | } 133 | if (i === n) return; 134 | items.removeClass('active'); 135 | for (var j = 0; j < m; j++) { 136 | if (items.eq(j).children('a').first().text() === ts[i]) break; 137 | } 138 | if (j === m) j = 0; // highlight the chapter title 139 | // search bottom-up for a visible TOC item to highlight; if an item is 140 | // hidden, we check if its parent is visible, and so on 141 | while (j > 0 && items.eq(j).is(':hidden')) j--; 142 | items.eq(j).addClass('active'); 143 | }, 250)); 144 | }; 145 | bookInner.on('scroll.bookdown', scrollHandler); 146 | bookBody.on('scroll.bookdown', scrollHandler); 147 | })(); 148 | 149 | // do not refresh the page if the TOC item points to the current page 150 | $('a[href="' + href + '"]').parent('li.chapter').children('a') 151 | .on('click', function(e) { 152 | bookInner.scrollTop(0); 153 | bookBody.scrollTop(0); 154 | return false; 155 | }); 156 | 157 | var toolbar = config.toolbar; 158 | if (!toolbar || toolbar.position !== 'static') { 159 | var bookHeader = $('.book-header'); 160 | bookBody.addClass('fixed'); 161 | bookHeader.addClass('fixed') 162 | .css('background-color', bookBody.css('background-color')) 163 | .on('click.bookdown', function(e) { 164 | // the theme may have changed after user clicks the theme button 165 | bookHeader.css('background-color', bookBody.css('background-color')); 166 | }); 167 | } 168 | 169 | }); 170 | 171 | gitbook.events.bind("page.change", function(e) { 172 | // store TOC scroll position 173 | var summary = $('ul.summary'); 174 | gs.set('tocScrollTop', summary.scrollTop()); 175 | }); 176 | 177 | var bookBody = $('.book-body'), bookInner = bookBody.find('.body-inner'); 178 | var chapterTitle = function() { 179 | return bookInner.find('.page-inner').find('h1,h2').first().text(); 180 | }; 181 | var bookTitle = function() { 182 | return bookInner.find('.book-header > h1').first().text(); 183 | }; 184 | var saveScrollPos = function(e) { 185 | // save scroll position before page is reloaded 186 | gs.set('bodyScrollTop', { 187 | body: bookBody.scrollTop(), 188 | inner: bookInner.scrollTop(), 189 | focused: document.hasFocus(), 190 | title: chapterTitle() 191 | }); 192 | }; 193 | $(document).on('servr:reload', saveScrollPos); 194 | 195 | // check if the page is loaded in an iframe (e.g. the RStudio preview window) 196 | var inIFrame = function() { 197 | var inIframe = true; 198 | try { inIframe = window.self !== window.top; } catch (e) {} 199 | return inIframe; 200 | }; 201 | $(window).on('blur unload', function(e) { 202 | if (inIFrame()) saveScrollPos(e); 203 | gs.set('bookTitle', bookTitle()); 204 | }); 205 | 206 | $(function(e) { 207 | if (gs.get('bookTitle', '') !== bookTitle()) localStorage.clear(); 208 | var pos = gs.get('bodyScrollTop'); 209 | if (pos) { 210 | if (pos.title === chapterTitle()) { 211 | if (pos.body !== 0) bookBody.scrollTop(pos.body); 212 | if (pos.inner !== 0) bookInner.scrollTop(pos.inner); 213 | } 214 | if (pos.focused) bookInner.find('.page-wrapper').focus(); 215 | } 216 | // clear book body scroll position 217 | gs.remove('bodyScrollTop'); 218 | }); 219 | 220 | }); 221 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/js/plugin-fontsettings.js: -------------------------------------------------------------------------------- 1 | gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { 2 | var fontState; 3 | 4 | var THEMES = { 5 | "white": 0, 6 | "sepia": 1, 7 | "night": 2 8 | }; 9 | 10 | var FAMILY = { 11 | "serif": 0, 12 | "sans": 1 13 | }; 14 | 15 | // Save current font settings 16 | function saveFontSettings() { 17 | gitbook.storage.set("fontState", fontState); 18 | update(); 19 | } 20 | 21 | // Increase font size 22 | function enlargeFontSize(e) { 23 | e.preventDefault(); 24 | if (fontState.size >= 4) return; 25 | 26 | fontState.size++; 27 | saveFontSettings(); 28 | }; 29 | 30 | // Decrease font size 31 | function reduceFontSize(e) { 32 | e.preventDefault(); 33 | if (fontState.size <= 0) return; 34 | 35 | fontState.size--; 36 | saveFontSettings(); 37 | }; 38 | 39 | // Change font family 40 | function changeFontFamily(index, e) { 41 | e.preventDefault(); 42 | 43 | fontState.family = index; 44 | saveFontSettings(); 45 | }; 46 | 47 | // Change type of color 48 | function changeColorTheme(index, e) { 49 | e.preventDefault(); 50 | 51 | var $book = $(".book"); 52 | 53 | if (fontState.theme !== 0) 54 | $book.removeClass("color-theme-"+fontState.theme); 55 | 56 | fontState.theme = index; 57 | if (fontState.theme !== 0) 58 | $book.addClass("color-theme-"+fontState.theme); 59 | 60 | saveFontSettings(); 61 | }; 62 | 63 | function update() { 64 | var $book = gitbook.state.$book; 65 | 66 | $(".font-settings .font-family-list li").removeClass("active"); 67 | $(".font-settings .font-family-list li:nth-child("+(fontState.family+1)+")").addClass("active"); 68 | 69 | $book[0].className = $book[0].className.replace(/\bfont-\S+/g, ''); 70 | $book.addClass("font-size-"+fontState.size); 71 | $book.addClass("font-family-"+fontState.family); 72 | 73 | if(fontState.theme !== 0) { 74 | $book[0].className = $book[0].className.replace(/\bcolor-theme-\S+/g, ''); 75 | $book.addClass("color-theme-"+fontState.theme); 76 | } 77 | }; 78 | 79 | function init(config) { 80 | var $bookBody, $book; 81 | 82 | //Find DOM elements. 83 | $book = gitbook.state.$book; 84 | $bookBody = $book.find(".book-body"); 85 | 86 | // Instantiate font state object 87 | fontState = gitbook.storage.get("fontState", { 88 | size: config.size || 2, 89 | family: FAMILY[config.family || "sans"], 90 | theme: THEMES[config.theme || "white"] 91 | }); 92 | 93 | update(); 94 | }; 95 | 96 | 97 | gitbook.events.bind("start", function(e, config) { 98 | var opts = config.fontsettings; 99 | 100 | // Create buttons in toolbar 101 | gitbook.toolbar.createButton({ 102 | icon: 'fa fa-font', 103 | label: 'Font Settings', 104 | className: 'font-settings', 105 | dropdown: [ 106 | [ 107 | { 108 | text: 'A', 109 | className: 'font-reduce', 110 | onClick: reduceFontSize 111 | }, 112 | { 113 | text: 'A', 114 | className: 'font-enlarge', 115 | onClick: enlargeFontSize 116 | } 117 | ], 118 | [ 119 | { 120 | text: 'Serif', 121 | onClick: _.partial(changeFontFamily, 0) 122 | }, 123 | { 124 | text: 'Sans', 125 | onClick: _.partial(changeFontFamily, 1) 126 | } 127 | ], 128 | [ 129 | { 130 | text: 'White', 131 | onClick: _.partial(changeColorTheme, 0) 132 | }, 133 | { 134 | text: 'Sepia', 135 | onClick: _.partial(changeColorTheme, 1) 136 | }, 137 | { 138 | text: 'Night', 139 | onClick: _.partial(changeColorTheme, 2) 140 | } 141 | ] 142 | ] 143 | }); 144 | 145 | 146 | // Init current settings 147 | init(opts); 148 | }); 149 | }); 150 | 151 | 152 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/js/plugin-search.js: -------------------------------------------------------------------------------- 1 | gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { 2 | var index = null; 3 | var $searchInput, $searchForm; 4 | var $highlighted, hi = 0, hiOpts = { className: 'search-highlight' }; 5 | var collapse = false; 6 | 7 | // Use a specific index 8 | function loadIndex(data) { 9 | // [Yihui] In bookdown, I use a character matrix to store the chapter 10 | // content, and the index is dynamically built on the client side. 11 | // Gitbook prebuilds the index data instead: https://github.com/GitbookIO/plugin-search 12 | // We can certainly do that via R packages V8 and jsonlite, but let's 13 | // see how slow it really is before improving it. On the other hand, 14 | // lunr cannot handle non-English text very well, e.g. the default 15 | // tokenizer cannot deal with Chinese text, so we may want to replace 16 | // lunr with a dumb simple text matching approach. 17 | index = lunr(function () { 18 | this.ref('url'); 19 | this.field('title', { boost: 10 }); 20 | this.field('body'); 21 | }); 22 | data.map(function(item) { 23 | index.add({ 24 | url: item[0], 25 | title: item[1], 26 | body: item[2] 27 | }); 28 | }); 29 | } 30 | 31 | // Fetch the search index 32 | function fetchIndex() { 33 | return $.getJSON(gitbook.state.basePath+"/search_index.json") 34 | .then(loadIndex); // [Yihui] we need to use this object later 35 | } 36 | 37 | // Search for a term and return results 38 | function search(q) { 39 | if (!index) return; 40 | 41 | var results = _.chain(index.search(q)) 42 | .map(function(result) { 43 | var parts = result.ref.split("#"); 44 | return { 45 | path: parts[0], 46 | hash: parts[1] 47 | }; 48 | }) 49 | .value(); 50 | 51 | // [Yihui] Highlight the search keyword on current page 52 | hi = 0; 53 | $highlighted = results.length === 0 ? undefined : $('.page-inner') 54 | .unhighlight(hiOpts).highlight(q, hiOpts).find('span.search-highlight'); 55 | scrollToHighlighted(); 56 | toggleTOC(results.length > 0); 57 | 58 | return results; 59 | } 60 | 61 | // [Yihui] Scroll the chapter body to the i-th highlighted string 62 | function scrollToHighlighted() { 63 | if (!$highlighted) return; 64 | var n = $highlighted.length; 65 | if (n === 0) return; 66 | var $p = $highlighted.eq(hi), p = $p[0], rect = p.getBoundingClientRect(); 67 | if (rect.top < 0 || rect.bottom > $(window).height()) { 68 | ($(window).width() >= 1240 ? $('.body-inner') : $('.book-body')) 69 | .scrollTop(p.offsetTop - 100); 70 | } 71 | $highlighted.css('background-color', ''); 72 | // an orange background color on the current item and removed later 73 | $p.css('background-color', 'orange'); 74 | setTimeout(function() { 75 | $p.css('background-color', ''); 76 | }, 2000); 77 | } 78 | 79 | // [Yihui] Expand/collapse TOC 80 | function toggleTOC(show) { 81 | if (!collapse) return; 82 | var toc_sub = $('ul.summary').children('li[data-level]').children('ul'); 83 | if (show) return toc_sub.show(); 84 | var href = window.location.pathname; 85 | href = href.substr(href.lastIndexOf('/') + 1); 86 | if (href === '') href = 'index.html'; 87 | var li = $('a[href^="' + href + location.hash + '"]').parent('li.chapter').first(); 88 | toc_sub.hide().parent().has(li).children('ul').show(); 89 | li.children('ul').show(); 90 | } 91 | 92 | // Create search form 93 | function createForm(value) { 94 | if ($searchForm) $searchForm.remove(); 95 | if ($searchInput) $searchInput.remove(); 96 | 97 | $searchForm = $('
', { 98 | 'class': 'book-search', 99 | 'role': 'search' 100 | }); 101 | 102 | $searchInput = $('', { 103 | 'type': 'search', 104 | 'class': 'form-control', 105 | 'val': value, 106 | 'placeholder': 'Type to search' 107 | }); 108 | 109 | $searchInput.appendTo($searchForm); 110 | $searchForm.prependTo(gitbook.state.$book.find('.book-summary')); 111 | } 112 | 113 | // Return true if search is open 114 | function isSearchOpen() { 115 | return gitbook.state.$book.hasClass("with-search"); 116 | } 117 | 118 | // Toggle the search 119 | function toggleSearch(_state) { 120 | if (isSearchOpen() === _state) return; 121 | if (!$searchInput) return; 122 | 123 | gitbook.state.$book.toggleClass("with-search", _state); 124 | 125 | // If search bar is open: focus input 126 | if (isSearchOpen()) { 127 | gitbook.sidebar.toggle(true); 128 | $searchInput.focus(); 129 | } else { 130 | $searchInput.blur(); 131 | $searchInput.val(""); 132 | gitbook.storage.remove("keyword"); 133 | gitbook.sidebar.filter(null); 134 | $('.page-inner').unhighlight(hiOpts); 135 | toggleTOC(false); 136 | } 137 | } 138 | 139 | // Recover current search when page changed 140 | function recoverSearch() { 141 | var keyword = gitbook.storage.get("keyword", ""); 142 | 143 | createForm(keyword); 144 | 145 | if (keyword.length > 0) { 146 | if(!isSearchOpen()) { 147 | toggleSearch(true); // [Yihui] open the search box 148 | } 149 | gitbook.sidebar.filter(_.pluck(search(keyword), "path")); 150 | } 151 | } 152 | 153 | 154 | gitbook.events.bind("start", function(e, config) { 155 | // [Yihui] disable search 156 | if (config.search === false) return; 157 | collapse = !config.toc || config.toc.collapse === 'section' || 158 | config.toc.collapse === 'subsection'; 159 | 160 | // Pre-fetch search index and create the form 161 | fetchIndex() 162 | // [Yihui] recover search after the page is loaded 163 | .then(recoverSearch); 164 | 165 | 166 | // Type in search bar 167 | $(document).on("keyup", ".book-search input", function(e) { 168 | var key = (e.keyCode ? e.keyCode : e.which); 169 | // [Yihui] Escape -> close search box; Up/Down: previous/next highlighted 170 | if (key == 27) { 171 | e.preventDefault(); 172 | toggleSearch(false); 173 | } else if (key == 38) { 174 | if (hi <= 0 && $highlighted) hi = $highlighted.length; 175 | hi--; 176 | scrollToHighlighted(); 177 | } else if (key == 40) { 178 | hi++; 179 | if ($highlighted && hi >= $highlighted.length) hi = 0; 180 | scrollToHighlighted(); 181 | } 182 | }).on("input", ".book-search input", function(e) { 183 | var q = $(this).val().trim(); 184 | if (q.length === 0) { 185 | gitbook.sidebar.filter(null); 186 | gitbook.storage.remove("keyword"); 187 | $('.page-inner').unhighlight(hiOpts); 188 | toggleTOC(false); 189 | } else { 190 | var results = search(q); 191 | gitbook.sidebar.filter( 192 | _.pluck(results, "path") 193 | ); 194 | gitbook.storage.set("keyword", q); 195 | } 196 | }); 197 | 198 | // Create the toggle search button 199 | gitbook.toolbar.createButton({ 200 | icon: 'fa fa-search', 201 | label: 'Search', 202 | position: 'left', 203 | onClick: toggleSearch 204 | }); 205 | 206 | // Bind keyboard to toggle search 207 | gitbook.keyboard.bind(['f'], toggleSearch); 208 | }); 209 | 210 | // [Yihui] do not try to recover search; always start fresh 211 | // gitbook.events.bind("page.change", recoverSearch); 212 | }); 213 | -------------------------------------------------------------------------------- /docs/libs/gitbook-2.6.7/js/plugin-sharing.js: -------------------------------------------------------------------------------- 1 | gitbook.require(["gitbook", "lodash", "jQuery"], function(gitbook, _, $) { 2 | var SITES = { 3 | 'github': { 4 | 'label': 'Github', 5 | 'icon': 'fa fa-github', 6 | 'onClick': function(e) { 7 | e.preventDefault(); 8 | var repo = $('meta[name="github-repo"]').attr('content'); 9 | if (typeof repo === 'undefined') throw("Github repo not defined"); 10 | window.open("https://github.com/"+repo); 11 | } 12 | }, 13 | 'facebook': { 14 | 'label': 'Facebook', 15 | 'icon': 'fa fa-facebook', 16 | 'onClick': function(e) { 17 | e.preventDefault(); 18 | window.open("http://www.facebook.com/sharer/sharer.php?s=100&p[url]="+encodeURIComponent(location.href)); 19 | } 20 | }, 21 | 'twitter': { 22 | 'label': 'Twitter', 23 | 'icon': 'fa fa-twitter', 24 | 'onClick': function(e) { 25 | e.preventDefault(); 26 | window.open("http://twitter.com/home?status="+encodeURIComponent(document.title+" "+location.href)); 27 | } 28 | }, 29 | 'google': { 30 | 'label': 'Google+', 31 | 'icon': 'fa fa-google-plus', 32 | 'onClick': function(e) { 33 | e.preventDefault(); 34 | window.open("https://plus.google.com/share?url="+encodeURIComponent(location.href)); 35 | } 36 | }, 37 | 'weibo': { 38 | 'label': 'Weibo', 39 | 'icon': 'fa fa-weibo', 40 | 'onClick': function(e) { 41 | e.preventDefault(); 42 | window.open("http://service.weibo.com/share/share.php?content=utf-8&url="+encodeURIComponent(location.href)+"&title="+encodeURIComponent(document.title)); 43 | } 44 | }, 45 | 'instapaper': { 46 | 'label': 'Instapaper', 47 | 'icon': 'fa fa-instapaper', 48 | 'onClick': function(e) { 49 | e.preventDefault(); 50 | window.open("http://www.instapaper.com/text?u="+encodeURIComponent(location.href)); 51 | } 52 | }, 53 | 'vk': { 54 | 'label': 'VK', 55 | 'icon': 'fa fa-vk', 56 | 'onClick': function(e) { 57 | e.preventDefault(); 58 | window.open("http://vkontakte.ru/share.php?url="+encodeURIComponent(location.href)); 59 | } 60 | } 61 | }; 62 | 63 | 64 | 65 | gitbook.events.bind("start", function(e, config) { 66 | var opts = config.sharing; 67 | if (!opts) return; 68 | 69 | // Create dropdown menu 70 | var menu = _.chain(opts.all) 71 | .map(function(id) { 72 | var site = SITES[id]; 73 | 74 | return { 75 | text: site.label, 76 | onClick: site.onClick 77 | }; 78 | }) 79 | .compact() 80 | .value(); 81 | 82 | // Create main button with dropdown 83 | if (menu.length > 0) { 84 | gitbook.toolbar.createButton({ 85 | icon: 'fa fa-share-alt', 86 | label: 'Share', 87 | position: 'right', 88 | dropdown: [menu] 89 | }); 90 | } 91 | 92 | // Direct actions to share 93 | _.each(SITES, function(site, sideId) { 94 | if (!opts[sideId]) return; 95 | 96 | gitbook.toolbar.createButton({ 97 | icon: site.icon, 98 | label: site.text, 99 | position: 'right', 100 | onClick: site.onClick 101 | }); 102 | }); 103 | }); 104 | }); 105 | -------------------------------------------------------------------------------- /docs/output_10_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_10_0.png -------------------------------------------------------------------------------- /docs/output_10_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_10_1.png -------------------------------------------------------------------------------- /docs/output_11_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_11_0.png -------------------------------------------------------------------------------- /docs/output_11_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_11_2.png -------------------------------------------------------------------------------- /docs/output_12_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_12_2.png -------------------------------------------------------------------------------- /docs/output_13_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_13_0.png -------------------------------------------------------------------------------- /docs/output_13_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_13_1.png -------------------------------------------------------------------------------- /docs/output_13_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_13_2.png -------------------------------------------------------------------------------- /docs/output_5_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_5_0.png -------------------------------------------------------------------------------- /docs/output_9_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_9_0.png -------------------------------------------------------------------------------- /docs/output_9_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/docs/output_9_1.png -------------------------------------------------------------------------------- /docs/style.css: -------------------------------------------------------------------------------- 1 | p.caption { 2 | color: #777; 3 | margin-top: 10px; 4 | } 5 | p code { 6 | white-space: inherit; 7 | } 8 | pre { 9 | word-break: normal; 10 | word-wrap: normal; 11 | } 12 | pre code { 13 | white-space: inherit; 14 | } 15 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/learnAnalytics-DeepLearning-Azure/00df3cc0c98e8ed28bb33364138e748aa8364b5b/license.txt --------------------------------------------------------------------------------