├── .gitattributes ├── README.md ├── lab-0 └── README.md ├── lab-1 ├── README.md ├── databricks │ └── README.md ├── pycharm │ └── README.md └── visual-studio │ └── README.md ├── lab-2 ├── README.md ├── databricks │ └── README.md ├── pycharm │ └── README.md └── visual-studio │ └── README.md ├── lab-3 ├── README.md ├── databricks │ └── README.md ├── pycharm │ └── README.md └── visual-studio │ └── README.md ├── lab-4 ├── README.md ├── databricks │ └── README.md ├── pycharm │ └── README.md └── visual-studio │ └── README.md ├── lab-5 ├── README.md ├── databricks │ └── README.md ├── pycharm │ └── README.md └── visual-studio │ └── README.md ├── lab-6 ├── README.md ├── databricks │ └── README.md ├── pycharm │ ├── README.md │ └── images │ │ ├── 01.png │ │ ├── 02.png │ │ ├── 03.png │ │ ├── 04.png │ │ └── 05.png └── visual-studio │ ├── README.md │ └── images │ ├── 01.png │ ├── 02.png │ ├── 03.png │ ├── 04.png │ └── 05.png └── starter-artifacts ├── databricks └── Azure ML Labs.dbc ├── pycharm └── azure-ml-labs │ ├── .idea │ ├── azure-ml-labs.iml │ ├── misc.xml │ ├── modules.xml │ └── workspace.xml │ ├── 01-model-training │ ├── 01-model-training.py │ ├── data │ │ ├── UsedCars_Affordability.csv │ │ └── UsedCars_Clean.csv │ └── training │ │ ├── .amlignore │ │ ├── aml_config │ │ ├── conda_dependencies.yml │ │ ├── docker.runconfig │ │ ├── local.runconfig │ │ └── project.json │ │ └── train.py │ ├── 02-model-management │ ├── 02_model_management.py │ ├── data │ │ └── UsedCars_Affordability.csv │ └── training │ │ ├── .amlignore │ │ ├── aml_config │ │ ├── conda_dependencies.yml │ │ ├── docker.runconfig │ │ ├── local.runconfig │ │ └── project.json │ │ └── train.py │ ├── 03-model-deployment-score.py │ ├── 03-model-deployment │ ├── 03_model_deployment.py │ └── data │ │ └── UsedCars_Affordability.csv │ ├── 04-automl │ ├── 04_automl.py │ ├── data │ │ └── UsedCars_Affordability.csv │ └── outputs │ │ └── aml_config │ │ ├── conda_dependencies.yml │ │ └── local.runconfig │ ├── 05-deep-learning │ └── 05_deep_learning.py │ ├── 06-deploy-to-iot-edge │ └── 06_deploy_to_iot_edge.py │ ├── iot_score.py │ ├── model.pkl │ └── myenv.yml └── visual-studio ├── .vs └── azure-ml-labs │ └── v15 │ └── .suo ├── 01-model-training ├── 01-model-training.pyproj ├── 01-model-training.pyproj.user ├── _01_model_training.py ├── data │ ├── UsedCars_Affordability.csv │ └── UsedCars_Clean.csv └── training │ ├── .amlignore │ ├── aml_config │ ├── conda_dependencies.yml │ ├── docker.runconfig │ ├── local.runconfig │ └── project.json │ └── train.py ├── 02-model-management ├── 02-model-management.pyproj ├── _02_model_management.py ├── data │ └── UsedCars_Affordability.csv ├── outputs │ ├── Experiment-01.pkl │ ├── Experiment-02-01.pkl │ ├── Experiment-02-02.pkl │ ├── Experiment-02-03.pkl │ ├── Experiment-02.pkl │ └── Experiment-03.pkl └── training │ ├── .amlignore │ ├── aml_config │ ├── conda_dependencies.yml │ ├── docker.runconfig │ ├── local.runconfig │ └── project.json │ └── train.py ├── 03-model-deployment ├── 03-model-deployment.pyproj ├── 03-model-deployment.pyproj.user ├── _03_model_deployment.py ├── azureml-models │ └── usedcarsmodel │ │ ├── 21 │ │ └── outputs │ │ │ ├── scaler.pkl │ │ │ └── usedcarsmodel.pkl │ │ └── 28 │ │ ├── outputs │ │ ├── scaler.pkl │ │ └── usedcarsmodel.pkl │ │ └── usedcarsmodel.tar.gz ├── data │ └── UsedCars_Affordability.csv ├── mydeployenv.yml ├── outputs │ ├── scaler.pkl │ └── usedcarsmodel.pkl └── score.py ├── 04-automl ├── 04-automl.pyproj ├── 04-automl.pyproj.user ├── _04_automl.py ├── automl.log ├── automl.log.1 ├── data │ └── UsedCars_Affordability.csv ├── model.pkl └── outputs │ └── aml_config │ ├── conda_dependencies.yml │ └── local.runconfig ├── 05-deep-learning ├── 05-deep-learning.pyproj └── _05_deep_learning.py ├── 06-deploy-to-iot-edge ├── 06-deploy-to-iot-edge.pyproj ├── _06_deploy_to_iot_edge.py ├── iot_score.py ├── model.pkl └── myenv.yml └── azure-ml-labs.sln /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Azure Machine Learning service Labs 2 | 3 | This repo contains labs that show how to use the Azure Machine Learning service with the Python SDK. These labs will teach you how to perform the training locally within a Deep Learning Virtual Machine (DLVM) as well scale out model training by using Azure Batch AI workspaces or Azure Databricks. Each lab provides instructions for you to peform them using the environment of your choice- Visual Studio (with the Visual Studio Tools for AI), PyCharm or Azure Databricks. 4 | 5 | The following labs are available: 6 | - [Lab 0:](./lab-0/README.md) Setting up your environment. If a lab environment has not be provided for you, this lab provides the instructions to get started in your own Azure Subscription. 7 | - [Lab 1:](./lab-1/README.md) Setup the Azure Machine Learning service from code and create a classical machine learning model that logs metrics collected during model training. 8 | - [Lab 2:](./lab-2/README.md) Use the capabilities of the Azure Machine Learning service to collect model performance metrics and to capture model version, as well as query the experimentation run history to retrieve captured metrics. 9 | - [Lab 3:](./lab-3/README.md) Deploying a trained model to containers using an Azure Container Instance and and Azure Kubernetes Service using Azure Machine Learning. 10 | - [Lab 4:](./lab-4/README.md) Using the automated machine learning (Auto ML) capabilities within the Azure Machine Learning service to automatically train multiple models with varying algorithms and hyperparameters and then select the best performing model. 11 | - [Lab 5:](./lab-5/README.md) Training deep learning models built with Keras and a Tensorflow backend that utilize GPUs with the Azure Machine Learning service. 12 | - [Lab 6:](./lab-6/README.md) Deploy a trained model container to an IoT Edge device via the Azure Machine Learning service. 13 | -------------------------------------------------------------------------------- /lab-0/README.md: -------------------------------------------------------------------------------- 1 | # Lab 0: Setting up your environment 2 | 3 | If a lab environmnet has not be provided for you, this lab provides the instructions to get started in your own Azure Subscription. 4 | 5 | The following summarizes the lab requirements if you want to setup your own environment (for example, on your local machine). If this is your first time peforming these labs, it is highly recommended you follow the Quick Start instructions below rather that setup your own environment from scratch. 6 | 7 | The labs have the following requirements: 8 | - Azure subscription. You will need a valid and active Azure account to complete this Azure lab. If you do not have one, you can sign up for a [free trial](https://azure.microsoft.com/en-us/free/). 9 | - One of the following environments: 10 | - Visual Studio 2017 and the Visual Studio Tools for AI 11 | - PyCharm 12 | - Azure Databricks Workspace 13 | - For the deep learning lab, you will need a VM or cluster with CPU capabilities. 14 | 15 | Depending on which environment you use, there are different requirements. These are summarized as follows: 16 | - Visual Studio 2017 and PyCharm 17 | - A Python 3.x Anaconda environment named `azureml` with: 18 | - The latest version of the Azure Machine Learning Python SDK installed. Use `pip install --upgrade azureml-sdk[notebooks,automl] azureml-dataprep` to install the latest version. 19 | - The following pip installable packages: 20 | - numpy, pandas, scikitlearn, keras and tensorflow-gpu 21 | - For the deep learning lab, make sure you have your GPU drivers properly installed. 22 | - Azure Databricks 23 | - An Azure Databricks Workspace 24 | - A two-node Azure Databricks cluster with the following Python libraries attached: 25 | - numpy, pandas, scikitlearn, keras and tensorflow-gpu 26 | 27 | The following sections describe the setup process for each environment. 28 | 29 | # Quickstart: Visual Studio 2017 and PyCharm 30 | The quickest way to get going with the labs is to deploy the Deep Learning Virtual Machine (DLVM). 31 | 32 | 1. Follow these instructions for [creating an Deep Learning Virtual Machine](https://docs.microsoft.com/en-us/azure/machine-learning/data-science-virtual-machine/provision-deep-learning-dsvm). Be sure you do the following: 33 | - OS Type: Select Windows 2016 34 | - Location: Choose a region that provides NC series VM's, such as East US, East US 2, North Central US, South Central US and West US 2. Be sure to visit the [Azure Products by Region](https://azure.microsoft.com/regions/services/) website for the latest. 35 | - Virtual Machine size: NC6 36 | 2. Once the VM is ready, download the remote desktop (RDP) file from the Overview blade of your VM in the Azure Portal and login. If you are unfamiliar with this process, see [Connect to a VM](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/connect-logon). 37 | 3. Once you are connected to your DLVM, open the Start Menu and run `Anaconda Prompt`. 38 | 4. Activate the azureml conda environment by running `activate azureml`. 39 | 5. To ensure tensorflow uses a GPU, you will need to uninstall and reinstall Keras and Tensorflow in a specific order. Run these commands in sequence: 40 | - `pip uninstall keras` 41 | - `pip uninstall tensorflow-gpu` 42 | - `pip uninstall tensorflow` 43 | - `pip install keras` 44 | - `pip install tensorflow-gpu==1.10.0` 45 | 6. Upgrade the installed version of the Azure Machine Learning SDK by running the following command: 46 | - `pip install --upgrade azureml-sdk[notebooks,automl] azureml-dataprep` 47 | 7. If you will be using Visual Studio for the labs, launch `Visual Studio 2017` from the Start menu and login with your Microsoft Account. Allow Visual Studio a few moments to get ready. Once you see the Tools for AI Start Page displayed in Visual Studio, the setup is complete. 48 | 8. If you will be using PyCharm for the labs, launch `JetBrains PyCharm Community Edition` from the Start menu. On the Complete Installation dialog, leave `Do not import settings` selected, accept the license agreement and choose an option for Data Sharing. On the Customize PyCharm screen, select `Skip Remaining and Set Defaults`. Once you are at the Welcome to PyCharm new project dialog the setup is complete. 49 | 9. Your Virtual Machine is now ready to support any of the labs using either the Visual Studio or PyCharm environments. 50 | 51 | 52 | # Quickstart: Azure Databricks 53 | 54 | 1. Click the following button to open the ARM template in the Azure Portal. 55 | [Deploy Databricks from the ARM Template](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2FAzure%2Fazure-quickstart-templates%2Fmaster%2F101-databricks-workspace%2Fazuredeploy.json) 56 | 57 | 2. Provide the required values to create your Azure Databricks workspace: 58 | - Subscription: Choose the Azure Subscription in which to deploy the workspace. 59 | - Resource Group: Leave at Create new and provide a name for the new resource group. 60 | - Location: Select a location near you for deployment that supports both Azure Databricks AND provides NC series GPU enabled Virtual Machines. This currently includes East US, East US 2, North Central US, South Central US and West US 2. For the latest list, see [Azure services available by region](https://azure.microsoft.com/regions/services/). 61 | - Workspace Name: Provide a name for your workspace. 62 | - Pricing Tier: Ensure `premium` is selected. 63 | 64 | 3. Accept the terms and conditions. 65 | 4. Select Purchase. 66 | 5. The workspace creation takes a few minutes. During workspace creation, the portal displays the Submitting deployment for Azure Databricks tile on the right side. You may need to scroll right on your dashboard to see the tile. There is also a progress bar displayed near the top of the screen. You can watch either area for progress. -------------------------------------------------------------------------------- /lab-1/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-1/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 1 - Training a Machine Learning Model using Azure Machine Learning service 2 | 3 | In this lab you will setup the Azure Machine Learning service from code and create a classical machine learning model that logs metrics collected during model training. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `azure-ml-labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `01 Model Training` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | 18 | 19 | -------------------------------------------------------------------------------- /lab-1/pycharm/README.md: -------------------------------------------------------------------------------- 1 | # Lab 1 - Training a Machine Learning Model using Azure Machine Learning service 2 | 3 | In this lab you will setup the Azure Machine Learning service from code and create a classical machine learning model that logs metrics collected during model training. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/pycharm`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Within PyCharm, select open Existing Project and navigate to the directory where you cloned the repo to open the project `azure-ml-labs`. 11 | 2. In the Project window, expand External Libraries. You should see one environment called `` where the path points to your AzureML Anaconda environment. This Anaconda environment will be used when you execute code. 12 | 3. In the Project tool window expand the folder `01-model-training`. 13 | 4. Expand the `data` folder. This folder contains two CSV files. `UsedCars_Clean.csv` represents the unlabeled data and `UsedCars_Affordability.csv` contains the complete data set with labels (Affordable is 1 for affordable, 0 for not affordable). 14 | 5. Expand `training`. This folder contains train.py which will be used later in the lab to train the model using a remote cluster provided by Azure Batch AI. 15 | 6. Open `01_model_training.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 16 | 17 | 18 | ## Exercise 2 - Train a simple model locally 19 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Alt + Shift + Enter` to execute the selected code in the Python Console. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. 20 | 2. In Step 2, we are going to try and build a model that can answer the question "Can I afford a car that is X months old and has Y kilometers on it, given I have $12,000 to spend?". We will engineer the label for affordable. Select the code starting with # Step 2 all the way down to but NOT including # Step 3. Use `Alt + Shift + Enter` to execute the selected code in the Python Console. 21 | 3. We are going to train a Logistic Regression model locally. This type of model requires us to standardize the scale of our training features Age and KM, so we use the `StandardScaler` from Scikit-Learn to transform these features so that they have values centered with a mean around 0 (mostly between -2.96 and 1.29). Select Step 3 and execute the code. Observe the difference in min and max values between the un-scaled and scaled Dataframes. 22 | 4. Train the model by fitting a LogisticRegression against the scaled input features (X_scaled) and the labels (y). Select Step 4 and execute the code. 23 | 5. Try prediction - if you set the age to 60 months and km to 40,000, does the model predict you can afford the car? Execute Step 5 and find out. 24 | 6. Now, let's get a sense for how accurate the model is. Select and execute Step 6. What was your model's accuracy? 25 | 7. One thing that can affect the model's performance is how much data of all the labeled training data available is used to train the model. In Step 7, you define a method that uses train_test_split from Scikit-Learn that will enable you to split the data using different percentages. Execute Step 7 to register this function. 26 | 27 | ## Exercise 3 - Use Azure Machine Learning to log performance metrics 28 | In the steps that follow, you will train multiple models using different sizes of training data and observe the impact on performance (accuracy). Each time you create new model, you are executing a Run in the terminology of Azure Machine Learning service. In this case, you will create one Experiment and execute multiple Runs within it, each with different training percentages (and resultant varying accuracies). 29 | 30 | 1. Execute Step 8 to quickly verify you have the Azure Machine Learning SDK installed. If you get a version number back without error, you are ready to proceed. 31 | 2. All Azure Machine Learning entities are organized within a Workspace. You can create an AML Workspace in the Azure Portal, but as the code in Step 9 shows, you can also create a Workspace directly from code. Set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Execute Step 9. You will be prompted to log in to your Azure Subscription. 32 | 3. To begin capturing metrics, you must first create an Experiment and then call `start_logging()` on that Experiment. The return value of this call is a Run. This root run can have other child runs. When you are finished with an experiment run, use `complete()` to close out the root run. Execute Step 10 to train four different models using differing amounts of training data and log the results to Azure Machine Learning. 33 | 4. Now that you have captured history for various runs, you can review the runs. You could use the Azure Portal for this - go to the Azure Portal, find your Azure Machine Learning Workspace, select Experiments and select the UsedCars_Experiment. However, in this case we will use the AML SDK to query for the runs. Select and execute Step 11 to view the runs and their status. 34 | 35 | ## Exercise 4 - Train remotely using Azure Batch AI 36 | Up until now, all of your training was executed locally on the same machine running Visual Studio. Now you will execute the same logic targeting a remote Azure Batch AI cluster, which you will provision from code. 37 | 38 | 1. Read thru and then execute Step 12 in which you will create an Azure Batch AI cluster using code. Once your cluster is ready, you should see output similar to the following: 39 | ``` 40 | Creating a new compute target... 41 | Creating 42 | succeeded..... 43 | BatchAI wait for completion finished 44 | Minimum number of nodes requested have been provisioned 45 | {'allocationState': 'steady', 'allocationStateTransitionTime': '2018-11-17T17:56:07.361000+00:00', 'creationTime': '2018-11-17T17:52:53.601000+00:00', 'currentNodeCount': 1, 'errors': None, 'nodeStateCounts': {'idleNodeCount': 0, 'leavingNodeCount': 0, 'preparingNodeCount': 1, 'runningNodeCount': 0, 'unusableNodeCount': 0}, 'provisioningState': 'succeeded', 'provisioningStateTransitionTime': '2018-11-17T17:53:59.653000+00:00', 'scaleSettings': {'manual': None, 'autoScale': {'maximumNodeCount': 3, 'minimumNodeCount': 1, 'initialNodeCount': 1}}, 'vmPriority': 'lowpriority', 'vmSize': 'STANDARD_DS11_V2'} 46 | 47 | ``` 48 | 2. With your cluster ready, you need to upload the training data to the default DataStore for your AML Workspace (which uses Azure Storage). Execute Step 13 to upload the data folder. 49 | 3. Next, you will need to create a training script that is similar to the code you have executed locally to train the model. Open `training/train.py` and read thru it. You do not need to execute this script, as you will send it to Azure Batch AI for execution. 50 | 4. Return to `01_model_training.py`. You will create an estimator that describes the configuration of the job that will execute your model training script. Execute Step 14 to create this estimator. 51 | 5. As the last step, submit the job using the `submit()` method of the Experiment object. Execute Step 15 to remotely execute your training script. The output you should see will begin with the creation of a Docker Container that contains your configured dependencies, followed by the execution of your training script. 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /lab-1/visual-studio/README.md: -------------------------------------------------------------------------------- 1 | # Lab 1 - Training a Machine Learning Model using Azure Machine Learning service 2 | 3 | In this lab you will setup the Azure Machine Learning service from code and create a classical machine learning model that logs metrics collected during model training. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/visual-studio`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Navigate to the directory where you cloned the repo and then open the solution `azure-ml-labs.sln`. 11 | 2. With the solution open in Visual Studio, look at Solution Explorer and expand the project `01-model-training`. 12 | 3. Under that, expand Python Environments. You should see one environment called `AzureML (3.6, 64-bit)`. This Anaconda environment will be used when you execute code either by running the script using F5 or by executing Python code in the Python Interactive Window. 13 | 4. Expand the `data` folder. This folder contains two CSV files. `UsedCars_Clean.csv` represents the unlabeled data and `UsedCars_Affordability.csv` contains the complete data set with labels (Affordable is 1 for affordable, 0 for not affordable). 14 | 5. Expand `training`. This folder contains train.py which will be used later in the lab to train the model using a remote cluster provided by Azure Batch AI. 15 | 6. Open `_01_model_training.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 16 | 17 | 18 | ## Exercise 2 - Train a simple model locally 19 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Control + Enter` to execute the selected code in the Python Immediate Window. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. 20 | 2. In Step 2, we are going to try and build a model that can answer the question "Can I afford a car that is X months old and has Y kilometers on it, given I have $12,000 to spend?". We will engineer the label for affordable. Select the code starting with # Step 2 all the way down to but NOT including # Step 3. Use `Control + Enter` to execute the selected code in the Python Immediate Window. 21 | 3. We are going to train a Logistic Regression model locally. This type of model requires us to standardize the scale of our training features Age and KM, so we use the `StandardScaler` from Scikit-Learn to transform these features so that they have values centered with a mean around 0 (mostly between -2.96 and 1.29). Select Step 3 and execute the code. Observe the difference in min and max values between the un-scaled and scaled Dataframes. 22 | 4. Train the model by fitting a LogisticRegression against the scaled input features (X_scaled) and the labels (y). Select Step 4 and execute the code. 23 | 5. Try prediction - if you set the age to 60 months and km to 40,000, does the model predict you can afford the car? Execute Step 5 and find out. 24 | 6. Now, let's get a sense for how accurate the model is. Select and execute Step 6. What was your model's accuracy? 25 | 7. One thing that can affect the model's performance is how much data of all the labeled training data available is used to train the model. In Step 7, you define a method that uses train_test_split from Scikit-Learn that will enable you to split the data using different percentages. Execute Step 7 to register this function. 26 | 27 | ## Exercise 3 - Use Azure Machine Learning to log performance metrics 28 | In the steps that follow, you will train multiple models using different sizes of training data and observe the impact on performance (accuracy). Each time you create new model, you are executing a Run in the terminology of Azure Machine Learning service. In this case, you will create one Experiment and execute multiple Runs within it, each with different training percentages (and resultant varying accuracies). 29 | 30 | 1. Execute Step 8 to quickly verify you have the Azure Machine Learning SDK installed. If you get a version number back without error, you are ready to proceed. 31 | 2. All Azure Machine Learning entities are organized within a Workspace. You can create an AML Workspace in the Azure Portal, but as the code in Step 9 shows, you can also create a Workspace directly from code. Set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Execute Step 9. You will be prompted to log in to your Azure Subscription. 32 | 3. To begin capturing metrics, you must first create an Experiment and then call `start_logging()` on that Experiment. The return value of this call is a Run. This root run can have other child runs. When you are finished with an experiment run, use `complete()` to close out the root run. Execute Step 10 to train four different models using differing amounts of training data and log the results to Azure Machine Learning. 33 | 4. Now that you have captured history for various runs, you can review the runs. You could use the Azure Portal for this - go to the Azure Portal, find your Azure Machine Learning Workspace, select Experiments and select the UsedCars_Experiment. However, in this case we will use the AML SDK to query for the runs. Select and execute Step 11 to view the runs and their status. 34 | 35 | ## Exercise 4 - Train remotely using Azure Batch AI 36 | Up until now, all of your training was executed locally on the same machine running Visual Studio. Now you will execute the same logic targeting a remote Azure Batch AI cluster, which you will provision from code. 37 | 38 | 1. Read thru and then execute Step 12 in which you will create an Azure Batch AI cluster using code. Once your cluster is ready, you should see output similar to the following: 39 | ``` 40 | Creating a new compute target... 41 | Creating 42 | succeeded..... 43 | BatchAI wait for completion finished 44 | Minimum number of nodes requested have been provisioned 45 | {'allocationState': 'steady', 'allocationStateTransitionTime': '2018-11-17T17:56:07.361000+00:00', 'creationTime': '2018-11-17T17:52:53.601000+00:00', 'currentNodeCount': 1, 'errors': None, 'nodeStateCounts': {'idleNodeCount': 0, 'leavingNodeCount': 0, 'preparingNodeCount': 1, 'runningNodeCount': 0, 'unusableNodeCount': 0}, 'provisioningState': 'succeeded', 'provisioningStateTransitionTime': '2018-11-17T17:53:59.653000+00:00', 'scaleSettings': {'manual': None, 'autoScale': {'maximumNodeCount': 3, 'minimumNodeCount': 1, 'initialNodeCount': 1}}, 'vmPriority': 'lowpriority', 'vmSize': 'STANDARD_DS11_V2'} 46 | 47 | ``` 48 | 2. With your cluster ready, you need to upload the training data to the default DataStore for your AML Workspace (which uses Azure Storage). Execute Step 13 to upload the data folder. 49 | 3. Next, you will need to create a training script that is similar to the code you have executed locally to train the model. Open `training/train.py` and read thru it. You do not need to execute this script, as you will send it to Azure Batch AI for execution. 50 | 4. Return to `_01_model_training.py`. You will create an estimator that describes the configuration of the job that will execute your model training script. Execute Step 14 to create this estimator. 51 | 5. As the last step, submit the job using the `submit()` method of the Experiment object. Execute Step 15 to remotely execute your training script. The output you should see will begin with the creation of a Docker Container that contains your configured dependencies, followed by the execution of your training script. 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /lab-2/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-2/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 2 - Using Azure Machine Learning service Model Versioning and Run History 2 | 3 | In this lab you will use the capabilities of the Azure Machine Learning service to collect model performance metrics and to capture model version, as well as query the experimentation run history to retrieve captured metrics. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `azure-ml-labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `02 Model Management` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | -------------------------------------------------------------------------------- /lab-2/pycharm/README.md: -------------------------------------------------------------------------------- 1 | # Lab 2 - Using Azure Machine Learning service Model Versioning and Run History 2 | 3 | In this lab you will use the capabilities of the Azure Machine Learning service to collect model performance metrics and to capture model version, as well as query the experimentation run history to retrieve captured metrics. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/pycharm`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 1. Within PyCharm, select open Existing Project and navigate to the directory where you cloned the repo to open the project `azure-ml-labs`. 10 | 2. In the Project window, expand External Libraries. You should see one environment called `` where the path points to your AzureML Anaconda environment. This Anaconda environment will be used when you execute code. 11 | 3. In the Project tool window expand the folder `02-model-training`. 12 | 4. Expand the `data` folder. This folder contains the CSV file `UsedCars_Affordability.csv` which contains the complete data set with labels (Affordable is 1 for affordable, 0 for not affordable). 13 | 5. Expand `training`. This folder contains train.py which will be used later in the lab to train the model using a remote cluster provided by Azure Batch AI. 14 | 6. Open `02_model_management.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 15 | 16 | 17 | ## Exercise 2 - Train a simple model locally 18 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. As such Steps 1, 2 and 3 in the lab are not explored in detail as their goal is to setup a few experiment runs, which was covered in detail in Lab 1. 19 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Alt + Shift + Enter` to execute the selected code in the Python Immediate Window. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. 20 | 2. In Step 2, we will define a helper method that locally trains, evaluates and then registers the trained model with Azure Machine Learning. Select and execute Step #2. 21 | 3. In Step 3, we retrieve an existing Azure Machine Learning Workspace (or create a new one if desired). In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. With the Workspace retrieved, we will train 3 different models using different subsets of the training data. Select and execute Step #3. 22 | 23 | 24 | ## Exercise 3 - Use Azure Machine Learning to query for performance metrics 25 | 1. As was demonstrated in the previous lab, you can use the Workspace to get a list of Experiments. You can also query for a particular Experiment by name. With an Experiment in hand, you review all runs associated with that Experiment and retrieve the metrics associated with each run. Select and execute Step #4 to see this process. What was the accuracy of the only run for Experiment-02-03? 26 | 27 | 28 | ## Exercise 4 - Remotely train a model in Azure Batch AI 29 | 1. Remote model training was covered in the previous lab. Execute Step #5 to create or retreive your Azure Batch AI cluster and the submit to it a model training job. Wait for the run to complete before proceeding to the next exercise. 30 | 31 | ## Exercise 5 - Retrieve metrics for the remote Run 32 | 1. You can easily retrieve the metrics for a Run executed remotely by using `run` object returned by the call to `Experiment.submit`. Execute Step 6 to retrieve metrics for the run you just executed. What was the accuracy of the run? 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /lab-2/visual-studio/README.md: -------------------------------------------------------------------------------- 1 | # Lab 2 - Using Azure Machine Learning service Model Versioning and Run History 2 | 3 | In this lab you will use the capabilities of the Azure Machine Learning service to collect model performance metrics and to capture model version, as well as query the experimentation run history to retrieve captured metrics. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/visual-studio`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Navigate to the directory where you cloned the repo and then open the solution `azure-ml-labs.sln`. 11 | 2. With the solution open in Visual Studio, look at Solution Explorer and expand the project `02-model-management`. 12 | 3. Under that, expand Python Environments. You should see one environment called `AzureML (3.6, 64-bit)`. This Anaconda environment will be used when you execute code either by running the script using F5 or by executing Python code in the Python Interactive Window. 13 | 4. Expand the `data` folder. This folder contains one CSV files. `UsedCars_Affordability.csv` contains the complete, cleaned data set with labels (Affordable is 1 for affordable, 0 for not affordable). 14 | 5. Expand `training`. This folder contains train.py which will be used later in the lab to train the model using a remote cluster provided by Azure Batch AI. 15 | 6. Open `_02_model_management.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 16 | 17 | 18 | ## Exercise 2 - Train a simple model locally 19 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. As such Steps 1, 2 and 3 in the lab are not explored in detail as their goal is to setup a few experiment runs, which was covered in detail in Lab 1. 20 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Control + Enter` to execute the selected code in the Python Immediate Window. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. 21 | 2. In Step 2, we will define a helper method that locally trains, evaluates and then registers the trained model with Azure Machine Learning. Select and execute Step #2. 22 | 3. In Step 3, we retrieve an existing Azure Machine Learning Workspace (or create a new one if desired). In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. With the Workspace retrieved, we will train 3 different models using different subsets of the training data. Select and execute Step #3. 23 | 24 | 25 | ## Exercise 3 - Use Azure Machine Learning to query for performance metrics 26 | 1. As was demonstrated in the previous lab, you can use the Workspace to get a list of Experiments. You can also query for a particular Experiment by name. With an Experiment in hand, you review all runs associated with that Experiment and retrieve the metrics associated with each run. Select and execute Step #4 to see this process. What was the accuracy of the only run for Experiment-02-03? 27 | 28 | 29 | ## Exercise 4 - Remotely train a model in Azure Batch AI 30 | 1. Remote model training was covered in the previous lab. Execute Step #5 to create or retreive your Azure Batch AI cluster and the submit to it a model training job. Wait for the run to complete before proceeding to the next exercise. 31 | 32 | ## Exercise 5 - Retrieve metrics for the remote Run 33 | 1. You can easily retrieve the metrics for a Run executed remotely by using `run` object returned by the call to `Experiment.submit`. Execute Step 6 to retrieve metrics for the run you just executed. What was the accuracy of the run? 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /lab-3/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-3/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 3 - Model Deployment using Azure Machine Learning service 2 | 3 | In this lab you will deploy a trained model to containers using an Azure Container Instance and and Azure Kubernetes Service using the Azure Machine Learning SDK. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `azure-ml-labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `03 Model Deployment` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | 18 | 19 | -------------------------------------------------------------------------------- /lab-3/pycharm/README.md: -------------------------------------------------------------------------------- 1 | # Lab 3 - Model Deployment using Azure Machine Learning service 2 | 3 | In this lab you will deploy a trained model to containers using an Azure Container Instance and and Azure Kubernetes Service using the Azure Machine Learning SDK. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/pycharm`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 1. Within PyCharm, select open Existing Project and navigate to the directory where you cloned the repo to open the project `azure-ml-labs`. 10 | 2. In the Project window, expand External Libraries. You should see one environment called `` where the path points to your AzureML Anaconda environment. This Anaconda environment will be used when you execute code. 11 | 3. In the Project tool window expand the folder `03-model-deployment`. 12 | 4. Expand the `data` folder. This folder contains the CSV file `UsedCars_Affordability.csv` which contains the complete data set with labels (Affordable is 1 for affordable, 0 for not affordable). 13 | 5. Open `03_model_deployment.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 14 | 15 | 16 | ## Exercise 2 - Train a simple model locally 17 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. 18 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Alt + Shift + Enter` to execute the selected code in the Python Immediate Window. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. In the function `train_eval_register_model` observe how the trained model is saved to the ./outputs folder along with the scaler that will be needed to scale inputs used later when scoring. Observe that we use `Model.register` to upload all files in the ./outputs folder to Azure Machine Learning as the model files. These model files will be retrieved later when the model is deployed into a container and operationalized as a web service. 19 | 2. In Step 2, we retrieve or create the AML Workspace and then train one instance of the model that we will deploy. In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Select and execute Step #2. 20 | 21 | 22 | ## Exercise 3 - Download a version of a model from Azure Machine Learning 23 | 1. Once a model is registered with Azure Machine Learning, we can download the model files to any client and use them for scoring. In Step 3, you download the model you just registered, load both the scaler and model files retrieved by deserializing them into objects and then use them to perform a single prediction. Select and execute Step 3. 24 | 25 | 26 | ## Exercise 4 - Create the container image configuration 27 | 1. When you deploy a model as web service to either ACI or AKS, you are deploying a Docker container. The first steps towards deploying involve defining the contents of that container. In Step 4, you create Conda Dependencies YAML file that describes what Python packages need to be installed in the container- in this case you specify scikit-learn, numpy and pandas. Select and execute Step 4. 28 | 2. With Azure Machine Learning, you have full control over the logic of the webservice which includes how it loads your model, transforms web service inputs, uses the model for scoring and returns the result. From the Project window, open `03-model-deployment-score.py` and read thru the code that defines the webservice. You do not need to execute this code as the file will be deployed in the contents of the container image you are about to create. 29 | 3. Return to `03_model_deployment.py`. To create a Container Image, you need three things: the scoring script file, the runtime configuration (defining whether Python or PySpark should be used) and the Conda Dependencies file. Calling `ContainerImage.image_configuration` will capture all of the container image configuration in a single object. Select and execute Step 5. 30 | 31 | ## Exercise 5 - Deploy the container image to ACI 32 | 1. With the Container Image configuration in hand, you are almost ready to deploy to ACI. The next step is to define the size of the VM that ACI will use to run your Container. Execute Step 6 to create this configuration. 33 | 2. To deploy the container that operationalizes your model as a webservice, you can use `Webservice.deploy_from_model` which will use your registered model, and automate the creation of a new Container Image, and run the created container in ACI. Execute Step 7 to deploy your webservice to ACI. This step will take 5-7 minutes to complete. 34 | 3. Once the webservice deployment completes, you can use the returned webservice object to invoke the webservice. Execute Step 8 to invoke your webservice deployed to ACI. 35 | 36 | ## Exercise 6 - Deploy the container image to AKS 37 | 1. Once you are familiar with the process for deploying a webservice to ACI, you will find the process for deploying to AKS to be similar with one additional step that creates the AKS cluster first. Execute Step 9 to provision a small AKS cluster. This step will take about 15-20 minutes. 38 | 2. With your AKS cluster ready, now you can deploy your webservice. Once again, you need to provide a configuration for the size of resources allocated from the AKS cluster to run instances of your Container. Execute Step 10 to deploy your webservice. This step will take 5-7 minutes. 39 | 3. As before, you can use the webservice object returned by the deploy_from_model method to invoke your deployed webservice. Execute Step 11 to verify you can invoke the web service. 40 | 41 | 42 | -------------------------------------------------------------------------------- /lab-3/visual-studio/README.md: -------------------------------------------------------------------------------- 1 | # Lab 3 - Model Deployment using Azure Machine Learning service 2 | 3 | In this lab you will deploy a trained model to containers using an Azure Container Instance and and Azure Kubernetes Service using the Azure Machine Learning SDK. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/visual-studio`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Navigate to the directory where you cloned the repo and then open the solution `azure-ml-labs.sln`. 11 | 2. With the solution open in Visual Studio, look at Solution Explorer and expand the project `03-model-deployment`. 12 | 3. Under that, expand Python Environments. You should see one environment called `AzureML (3.6, 64-bit)`. This Anaconda environment will be used when you execute code either by running the script using F5 or by executing Python code in the Python Interactive Window. 13 | 4. Expand the `data` folder. This folder contains one CSV files. `UsedCars_Affordability.csv` contains the complete, cleaned data set with labels (Affordable is 1 for affordable, 0 for not affordable). 14 | 5. Open `_03_model_deployment.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 15 | 16 | 17 | ## Exercise 2 - Train a simple model locally 18 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. 19 | 1. Read thru and select the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Control + Enter` to execute the selected code in the Python Immediate Window. Take a moment to look at the data loaded into the Pandas Dataframe - it contains data about used cars such as the price (in dollars), age (in years), KM (kilometers driven) and other attributes like weather it is automatic transimission, the number of doors, and the weight. In the function `train_eval_register_model` observe how the trained model is saved to the ./outputs folder along with the scaler that will be needed to scale inputs used later when scoring. Observe that we use `Model.register` to upload all files in the ./outputs folder to Azure Machine Learning as the model files. These model files will be retrieved later when the model is deployed into a container and operationalized as a web service. 20 | 2. In Step 2, we retrieve or create the AML Workspace and then train one instance of the model that we will deploy. In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Select and execute Step #2. 21 | 22 | 23 | ## Exercise 3 - Download a version of a model from Azure Machine Learning 24 | 1. Once a model is registered with Azure Machine Learning, we can download the model files to any client and use them for scoring. In Step 3, you download the model you just registered, load both the scaler and model files retrieved by deserializing them into objects and then use them to perform a single prediction. Select and execute Step 3. 25 | 26 | 27 | ## Exercise 4 - Create the container image configuration 28 | 1. When you deploy a model as web service to either ACI or AKS, you are deploying a Docker container. The first steps towards deploying involve defining the contents of that container. In Step 4, you create Conda Dependencies YAML file that describes what Python packages need to be installed in the container- in this case you specify scikit-learn, numpy and pandas. Select and execute Step 4. 29 | 2. With Azure Machine Learning, you have full control over the logic of the webservice which includes how it loads your model, transforms web service inputs, uses the model for scoring and returns the result. From Solution Explorer, open Score.py and read thru the code that defines the webservice. You do not need to execute this code as the file will be deployed in the contents of the container image you are about to create. 30 | 3. Return to _03_model_deployment.py. To create a Container Image, you need three things: the scoring script file, the runtime configuration (defining whether Python or PySpark should be used) and the Conda Dependencies file. Calling `ContainerImage.image_configuration` will capture all of the container image configuration in a single object. Select and execute Step 5. 31 | 32 | ## Exercise 5 - Deploy the container image to ACI 33 | 1. With the Container Image configuration in hand, you are almost ready to deploy to ACI. The next step is to define the size of the VM that ACI will use to run your Container. Execute Step 6 to create this configuration. 34 | 2. To deploy the container that operationalizes your model as a webservice, you can use `Webservice.deploy_from_model` which will use your registered model, and automate the creation of a new Container Image, and run the created container in ACI. Execute Step 7 to deploy your webservice to ACI. This step will take 5-7 minutes to complete. 35 | 3. Once the webservice deployment completes, you can use the returned webservice object to invoke the webservice. Execute Step 8 to invoke your webservice deployed to ACI. 36 | 37 | ## Exercise 6 - Deploy the container image to AKS 38 | 1. Once you are familiar with the process for deploying a webservice to ACI, you will find the process for deploying to AKS to be similar with one additional step that creates the AKS cluster first. Execute Step 9 to provision a small AKS cluster. This step will take about 15-20 minutes. 39 | 2. With your AKS cluster ready, now you can deploy your webservice. Once again, you need to provide a configuration for the size of resources allocated from the AKS cluster to run instances of your Container. Execute Step 10 to deploy your webservice. This step will take 5-7 minutes. 40 | 3. As before, you can use the webservice object returned by the deploy_from_model method to invoke your deployed webservice. Execute Step 11 to verify you can invoke the web service. 41 | 42 | 43 | -------------------------------------------------------------------------------- /lab-4/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-4/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 4 - Model Training with AutoML 2 | 3 | In this lab you will us the automated machine learning (Auto ML) capabilities within the Azure Machine Learning service to automatically train multiple models with varying algorithms and hyperparameters, select the best performing model and register that model. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `azure-ml-labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `04 AutoML` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /lab-4/pycharm/README.md: -------------------------------------------------------------------------------- 1 | # Lab 4 - Model Training with AutoML 2 | 3 | In this lab you will us the automated machine learning (Auto ML) capabilities within the Azure Machine Learning service to automatically train multiple models with varying algorithms and hyperparameters, select the best performing model and register that model. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/pycharm`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 1. Within PyCharm, select open Existing Project and navigate to the directory where you cloned the repo to open the project `azure-ml-labs`. 10 | 2. In the Project window, expand External Libraries. You should see one environment called `` where the path points to your AzureML Anaconda environment. This Anaconda environment will be used when you execute code. 11 | 3. In the Project tool window expand the folder `04-automl`. 12 | 4. Expand the `data` folder. This folder contains the CSV file `UsedCars_Affordability.csv` which contains the complete data set with labels (Affordable is 1 for affordable, 0 for not affordable). 13 | 5. Open `04_automl.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 14 | 15 | 16 | ## Exercise 2 - Train a model using AutoML 17 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. 18 | 1. Begin with Step 1. In this step you are loading the data prepared in previous labs and acquiring (or creating) an instance of your Azure Machine Learning Workspace. In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Read thru the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Control + Enter` to execute the selected code in the Python Immediate Window. 19 | 2. To train a model using AutoML you need only provide a configuration for AutoML that defines items such as the type of model (classification or regression), the performance metric to optimize, exit criteria in terms of max training time and iterations and desired performance, any algorithms that should not be used, and the path into which to output the results. This configuration is specified using the `AutomMLConfig` class, which is then used to drive the submission of an experiment via `experiment.submit`. When AutoML finishes the parent run, you can easily get the best performing run and model from the returned run object by using `run.get_output()`. Select and execute Step 2 to define the helper function that wraps the AutoML job submission. 20 | 3. In Step 3, you invoke the AutoML job. Select and execute Step 3. 21 | 4. Try out the best model by using Step 4. 22 | 23 | ## Exercise 3 - Register an AutoML created model 24 | 1. You can register models created by AutoML with Azure Machine Learning just as you would any other model. Select and execute Step 5 to register this model. 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /lab-4/visual-studio/README.md: -------------------------------------------------------------------------------- 1 | # Lab 4 - Model Training with AutoML 2 | 3 | In this lab you will us the automated machine learning (Auto ML) capabilities within the Azure Machine Learning service to automatically train multiple models with varying algorithms and hyperparameters, select the best performing model and register that model. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/visual-studio`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Navigate to the directory where you cloned the repo and then open the solution `azure-ml-labs.sln`. 11 | 2. With the solution open in Visual Studio, look at Solution Explorer and expand the project `04-automl`. 12 | 3. Under that, expand Python Environments. You should see one environment called `AzureML (3.6, 64-bit)`. This Anaconda environment will be used when you execute code either by running the script using F5 or by executing Python code in the Python Interactive Window. 13 | 4. Expand the `data` folder. This folder contains one CSV files. `UsedCars_Affordability.csv` contains the complete, cleaned data set with labels (Affordable is 1 for affordable, 0 for not affordable). 14 | 5. Open `_04_automl.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 15 | 16 | 17 | ## Exercise 2 - Train a model using AutoML 18 | This lab builds upon the lessons learned in the previous lab, but is self contained so you work thru this lab without having to run a previous lab. 19 | 1. Begin with Step 1. In this step you are loading the data prepared in previous labs and acquiring (or creating) an instance of your Azure Machine Learning Workspace. In this step, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments. Read thru the code starting with # Step 1 all the way down to but NOT including # Step 2. Use `Control + Enter` to execute the selected code in the Python Immediate Window. 20 | 2. To train a model using AutoML you need only provide a configuration for AutoML that defines items such as the type of model (classification or regression), the performance metric to optimize, exit criteria in terms of max training time and iterations and desired performance, any algorithms that should not be used, and the path into which to output the results. This configuration is specified using the `AutomMLConfig` class, which is then used to drive the submission of an experiment via `experiment.submit`. When AutoML finishes the parent run, you can easily get the best performing run and model from the returned run object by using `run.get_output()`. Select and execute Step 2 to define the helper function that wraps the AutoML job submission. 21 | 3. In Step 3, you invoke the AutoML job. Select and execute Step 3. 22 | 4. Try out the best model by using Step 4. 23 | 24 | ## Exercise 3 - Register an AutoML created model 25 | 1. You can register models created by AutoML with Azure Machine Learning just as you would any other model. Select and execute Step 5 to register this model. 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /lab-5/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-5/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 5 - Deep Learning 2 | 3 | In this lab you train deep learning models built with Keras and a Tensorflow backend that utilize GPUs with the Azure Machine Learning service. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `Azure ML Labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `05 Deep Learning` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /lab-5/pycharm/README.md: -------------------------------------------------------------------------------- 1 | # Lab 5 - Deep Learning 2 | 3 | In this lab you train deep learning models built with Keras and a Tensorflow backend that utilize GPUs with the Azure Machine Learning service. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/pycharm`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 1. Within PyCharm, select open Existing Project and navigate to the directory where you cloned the repo to open the project `azure-ml-labs`. 10 | 2. In the Project window, expand External Libraries. You should see one environment called `` where the path points to your AzureML Anaconda environment. This Anaconda environment will be used when you execute code. 11 | 3. In the Project tool window expand the folder `05-deep-learning`. 12 | 4. Open `05_deep_learning.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 13 | 14 | 15 | ## Exercise 2 - Train an autoencoder using GPU 16 | 1. Begin with Step 1 and read thru the code in Step 1. Here you will use Keras to define an autoencoder. Don't get hung up on the details of constructing the auto-encoder. The point of this lab is to show you how to train neural networks using GPU's. Select Step 1 and type `Alt + Shift + Enter` to execute the selected code in the Python Console window. In the output, verify that `K.tensorflow_backend._get_available_gpus()` returned an entry describing a GPU available in your environment. 17 | 2. Once you have your autoencoder model structured, you need to train the the underlying neural network. Training this model on regular CPU's will take hours. However, you can execute this same code in an environment with GPU's for better performance. Select and execute Step 2. How long did your training take? 18 | 3. With a trained auto-encoder in hand, try using the model by selecting and executing Step 3. 19 | 20 | ## Exercise 3 - Register the neural network model with Azure Machine Learning 21 | 1. In Step 4, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments to create or retrieve your workspace. Observe that you can register a neural network model with Azure Machine Learning in exactly the same way you would register a classical machine learning model. Execute Step 4 to register the model. 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /lab-5/visual-studio/README.md: -------------------------------------------------------------------------------- 1 | # Lab 5 - Deep Learning 2 | 3 | In this lab you train deep learning models built with Keras and a Tensorflow backend that utilize GPUs with the Azure Machine Learning service. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/visual-studio`. 7 | 8 | ## Exercise 1 - Get oriented to the lab files 9 | 10 | 1. Navigate to the directory where you cloned the repo and then open the solution `azure-ml-labs.sln`. 11 | 2. With the solution open in Visual Studio, look at Solution Explorer and expand the project `05-deep-learning`. 12 | 3. Under that, expand Python Environments. You should see one environment called `AzureML (3.6, 64-bit)`. This Anaconda environment will be used when you execute code either by running the script using F5 or by executing Python code in the Python Interactive Window. 13 | 4. Open `_05_deep_learning.py`. This is the Python file you will step thru executing in this lab. Leave it open and continue to the next exercise. 14 | 15 | 16 | ## Exercise 2 - Train an autoencoder using GPU 17 | 1. Begin with Step 1 and read thru the code in Step 1. Here you will use Keras to define an autoencoder. Don't get hung up on the details of constructing the auto-encoder. The point of this lab is to show you how to train neural networks using GPU's. Select Step 1 and type `Control + Enter` to execute the selected code in the Python Immediate Window. In the output, verify that `K.tensorflow_backend._get_available_gpus()` returned an entry describing a GPU available in your environment. 18 | 2. Once you have your autoencoder model structured, you need to train the the underlying neural network. Training this model on regular CPU's will take hours. However, you can execute this same code in an environment with GPU's for better performance. Select and execute Step 2. How long did your training take? 19 | 3. With a trained auto-encoder in hand, try using the model by selecting and executing Step 3. 20 | 21 | ## Exercise 3 - Register the neural network model with Azure Machine Learning 22 | 1. In Step 4, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments to create or retrieve your workspace. Observe that you can register a neural network model with Azure Machine Learning in exactly the same way you would register a classical machine learning model. Execute Step 4 to register the model. 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /lab-6/README.md: -------------------------------------------------------------------------------- 1 | This lab can be perfomed using multiple environments, choose the environment in which you want to perform the lab below: 2 | 3 | - [Visual Studio](./visual-studio/README.md) 4 | - [PyCharm](./pycharm/README.md) 5 | - [Azure Databricks](./databricks/README.md) -------------------------------------------------------------------------------- /lab-6/databricks/README.md: -------------------------------------------------------------------------------- 1 | # Lab 6 - Model Deployment to IoT Edge 2 | 3 | In this lab you deploy a trained model container to an IoT Edge device. 4 | 5 | ## Exercise 0 - Get the lab files 6 | If you have not cloned this repository to your working environment, do so now. All of the artifacts for this lab are located under `starter-artifacts/databricks`. 7 | 8 | 1. From the Azure Portal, navigate to your deployed Azure Databricks workspace and select Launch Workspace. 9 | 2. Within the Workspace, using the command bar on the left, select Workspace, Users and select your username (the entry with house icon). 10 | 3. In the blade that appears, select the downwards pointing chevron next to your name, and select Import. 11 | 4. On the Import Notebooks dialog, select File. 12 | 5. Select browse and then navigate to `starter-artifacts/databricks` and select `Azure ML Labs.dbc` 13 | 5. Select Import. 14 | 6. A folder named after the archive should appear. Select that folder. 15 | 7. Navigate into the folder `06 Deploy to IoT Edge` 16 | 8. The folder will contain one or more notebooks. These are the notebooks you will use in completing this lab. Start with the first notebook and follow the instructions. 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /lab-6/pycharm/images/01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/pycharm/images/01.png -------------------------------------------------------------------------------- /lab-6/pycharm/images/02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/pycharm/images/02.png -------------------------------------------------------------------------------- /lab-6/pycharm/images/03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/pycharm/images/03.png -------------------------------------------------------------------------------- /lab-6/pycharm/images/04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/pycharm/images/04.png -------------------------------------------------------------------------------- /lab-6/pycharm/images/05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/pycharm/images/05.png -------------------------------------------------------------------------------- /lab-6/visual-studio/images/01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/visual-studio/images/01.png -------------------------------------------------------------------------------- /lab-6/visual-studio/images/02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/visual-studio/images/02.png -------------------------------------------------------------------------------- /lab-6/visual-studio/images/03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/visual-studio/images/03.png -------------------------------------------------------------------------------- /lab-6/visual-studio/images/04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/visual-studio/images/04.png -------------------------------------------------------------------------------- /lab-6/visual-studio/images/05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/lab-6/visual-studio/images/05.png -------------------------------------------------------------------------------- /starter-artifacts/databricks/Azure ML Labs.dbc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nthacker/AML-service-labs/0c5d2fc86ec67a6bd9b3a52c0bded3ec2985df04/starter-artifacts/databricks/Azure ML Labs.dbc -------------------------------------------------------------------------------- /starter-artifacts/pycharm/azure-ml-labs/.idea/azure-ml-labs.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | -------------------------------------------------------------------------------- /starter-artifacts/pycharm/azure-ml-labs/.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /starter-artifacts/pycharm/azure-ml-labs/.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /starter-artifacts/pycharm/azure-ml-labs/.idea/workspace.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 31 | 32 | 33 | 34 | /data 35 | data/ 36 | 37 | 38 | 39 | 48 | 49 | 50 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 |