├── .cloud └── .azure │ ├── aci_deploy.json │ ├── aks_deploy.json │ ├── deploy.json │ ├── docker.json │ └── test │ ├── test_aci_deploy.json │ ├── test_aks_deploy.json │ ├── test_fapp_deploy.json │ └── test_workspace.json ├── .github ├── CODEOWNERS └── workflows │ ├── integration.yml │ ├── integration_functionapp.yml │ ├── python.yml │ ├── synch_docs.yml │ └── versioning.yml ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── LICENSE ├── README.md ├── SECURITY.md ├── action.yml ├── code ├── entrypoint.sh ├── main.py ├── schemas.py └── utils.py └── tests ├── deploy ├── environment.yml └── score.py ├── test_main.py └── test_utils.py /.cloud/.azure/aci_deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "inference_source_directory": "", 4 | "inference_entry_script": "", 5 | "test_enabled": true, 6 | "test_file_path": "", 7 | "test_file_function_name": "", 8 | "conda_file": "", 9 | "extra_docker_file_steps": "", 10 | "enable_gpu": false, 11 | "cuda_version": "", 12 | "model_data_collection_enabled": true, 13 | "authentication_enabled": true, 14 | "app_insights_enabled": true, 15 | "runtime": "<'python' or 'spark-py'>", 16 | "custom_base_image": "", 17 | "cpu_cores": 0.1, 18 | "memory_gb": 0.5, 19 | "delete_service_after_deployment": false, 20 | "profiling_enabled": false, 21 | "profiling_dataset": "", 22 | "skip_deployment": false, 23 | "tags": {"": ""}, 24 | "properties": {"": ""}, 25 | "description": "", 26 | "location": "", 27 | "ssl_enabled": true, 28 | "ssl_cert_pem_file": "", 29 | "ssl_key_pem_file": "", 30 | "ssl_cname": "", 31 | "dns_name_label": "" 32 | } -------------------------------------------------------------------------------- /.cloud/.azure/aks_deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "deployment_compute_target": "", 4 | "inference_source_directory": "", 5 | "inference_entry_script": "", 6 | "test_enabled": true, 7 | "test_file_path": "", 8 | "test_file_function_name": "", 9 | "conda_file": "", 10 | "extra_docker_file_steps": "", 11 | "enable_gpu": false, 12 | "cuda_version": "", 13 | "model_data_collection_enabled": true, 14 | "authentication_enabled": true, 15 | "app_insights_enabled": true, 16 | "runtime": "<'python' or 'spark-py'>", 17 | "custom_base_image": "", 18 | "cpu_cores": 0.1, 19 | "memory_gb": 0.5, 20 | "gpu_cores": 0, 21 | "delete_service_after_deployment": false, 22 | "profiling_enabled": false, 23 | "profiling_dataset": "", 24 | "skip_deployment": false, 25 | "tags": {"": ""}, 26 | "properties": {"": ""}, 27 | "description": "", 28 | "autoscale_enabled": true, 29 | "autoscale_min_replicas": 1, 30 | "autoscale_max_replicas": 10, 31 | "autoscale_refresh_seconds": 1, 32 | "autoscale_target_utilization": 70, 33 | "scoring_timeout_ms": 60000, 34 | "replica_max_concurrent_requests": 1, 35 | "max_request_wait_time": 1000, 36 | "num_replicas": null, 37 | "period_seconds": 10, 38 | "initial_delay_seconds": 310, 39 | "timeout_seconds": 2, 40 | "success_threshold": 1, 41 | "failure_threshold": 3, 42 | "namespace": "", 43 | "token_auth_enabled": true 44 | } -------------------------------------------------------------------------------- /.cloud/.azure/deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "deployment_compute_target": "", // do not specify deployment compute target name for deployment on Azure Container Instances 4 | "inference_source_directory": "", 5 | "inference_entry_script": "", 6 | "conda_file": "", 7 | "extra_docker_file_steps": "", 8 | "test_enabled": true, 9 | "test_file_path": "", 10 | "test_file_function_name": "", 11 | "enable_gpu": false, 12 | "cuda_version": "", 13 | "model_data_collection_enabled": true, 14 | "authentication_enabled": true, 15 | "app_insights_enabled": true, 16 | "runtime": "<'python' or 'spark-py'>", 17 | "custom_base_image": "", 18 | "cpu_cores": 0.1, 19 | "memory_gb": 0.5, 20 | "delete_service_after_deployment": false, 21 | "profiling_enabled": false, 22 | "profiling_dataset": "", 23 | "skip_deployment": false, 24 | "create_image": "<'docker', 'function_blob', 'function_http' or 'function_service_bus_queue' >", 25 | "tags": {"": ""}, 26 | "properties": {"": ""}, 27 | "description": "", 28 | 29 | // aci specific parameters 30 | "location": "", 31 | "ssl_enabled": true, 32 | "ssl_cert_pem_file": "", 33 | "ssl_key_pem_file": "", 34 | "ssl_cname": "", 35 | "dns_name_label": "", 36 | 37 | // aks specific parameters 38 | "gpu_cores": 0, 39 | "autoscale_enabled": true, 40 | "autoscale_min_replicas": 1, 41 | "autoscale_max_replicas": 10, 42 | "autoscale_refresh_seconds": 1, 43 | "autoscale_target_utilization": 70, 44 | "scoring_timeout_ms": 60000, 45 | "replica_max_concurrent_requests": 1, 46 | "max_request_wait_time": 1000, 47 | "num_replicas": null, 48 | "period_seconds": 10, 49 | "initial_delay_seconds": 310, 50 | "timeout_seconds": 2, 51 | "success_threshold": 1, 52 | "failure_threshold": 3, 53 | "namespace": "", 54 | "token_auth_enabled": true 55 | } 56 | -------------------------------------------------------------------------------- /.cloud/.azure/docker.json: -------------------------------------------------------------------------------- 1 | { 2 | "inference_source_directory": "", 3 | "inference_entry_script": "", 4 | "conda_file": "", 5 | "extra_docker_file_steps": "", 6 | "runtime": "<'python' or 'spark-py'>", 7 | "enable_gpu": false, 8 | "cuda_version": "", 9 | "custom_base_image": "", 10 | "description": "", 11 | "skip_deployment": true, 12 | "create_image": "<'docker', 'function_blob', 'function_http' or 'function_service_bus_queue' >" 13 | } -------------------------------------------------------------------------------- /.cloud/.azure/test/test_aci_deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TESTSERVICENAME", 3 | "inference_source_directory": "tests/deploy/", 4 | "inference_entry_script": "score.py", 5 | "conda_file": "environment.yml", 6 | "test_enabled": false, 7 | "delete_service_after_deployment": true 8 | } -------------------------------------------------------------------------------- /.cloud/.azure/test/test_aks_deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TESTSERVICENAME", 3 | "deployment_compute_target": "aks-intTest", 4 | "inference_source_directory": "tests/deploy/", 5 | "inference_entry_script": "score.py", 6 | "conda_file": "environment.yml", 7 | "delete_service_after_deployment": true 8 | } -------------------------------------------------------------------------------- /.cloud/.azure/test/test_fapp_deploy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "testservicename", 3 | "inference_source_directory": "tests/deploy/", 4 | "inference_entry_script": "score.py", 5 | "conda_file": "environment.yml", 6 | "test_enabled": false, 7 | "delete_service_after_deployment": true, 8 | "skip_deployment": true, 9 | "create_image": "function_http" 10 | } -------------------------------------------------------------------------------- /.cloud/.azure/test/test_workspace.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gha_and_aml_workspace", 3 | "resource_group": "gha_and_aml_rg" 4 | } -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @marvinbuss -------------------------------------------------------------------------------- /.github/workflows/integration.yml: -------------------------------------------------------------------------------- 1 | name: Integration Test 2 | on: [push , pull_request] 3 | jobs: 4 | build: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - name: Check Out Repository 8 | id: checkout_repository 9 | uses: actions/checkout@v2 10 | 11 | # Connect to existing workspace 12 | - name: Create Workspace 13 | id: ws_create 14 | uses: Azure/aml-workspace@v1 15 | with: 16 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 17 | parameters_file: "test/test_workspace.json" 18 | 19 | - name: Update Service Name Bash for ACI 20 | run: | 21 | sed -i -e 's/TESTSERVICENAME/'aci$RANDOM'/g' .cloud/.azure/test/test_aci_deploy.json 22 | 23 | # Deploy model in Azure Machine Learning to ACI 24 | - name: Deploy model to ACI 25 | id: aml_deploy_aci 26 | uses: ./ 27 | with: 28 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 29 | model_name: mymodel 30 | model_version: 1 31 | parameters_file: "test/test_aci_deploy.json" 32 | 33 | - name: Update Service Name Bash for AKS 34 | run: | 35 | sed -i -e 's/TESTSERVICENAME/'aks$RANDOM'/g' .cloud/.azure/test/test_aks_deploy.json 36 | 37 | # Deploy model in Azure Machine Learning to AKS 38 | - name: Deploy model to AKS 39 | id: aml_deploy_aks 40 | uses: ./ 41 | with: 42 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 43 | model_name: mymodel 44 | model_version: 1 45 | parameters_file: "test/test_aks_deploy.json" 46 | -------------------------------------------------------------------------------- /.github/workflows/integration_functionapp.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Function App Integration Test 3 | on: [push , pull_request] 4 | jobs: 5 | build: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Check Out Repository 9 | id: checkout_repository 10 | uses: actions/checkout@master 11 | 12 | # Connect to existing workspace 13 | - name: Create Workspace 14 | id: ws_create 15 | uses: Azure/aml-workspace@v1 16 | with: 17 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 18 | parameters_file: "test/test_workspace.json" 19 | 20 | # Deploy model in Azure Machine Learning to ACI 21 | - name: Deploy model 22 | id: aml_deploy 23 | uses: ./ 24 | with: 25 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 26 | model_name: mymodel 27 | model_version: 1 28 | parameters_file: "test/test_fapp_deploy.json" 29 | 30 | - name: 'Login via Azure CLI' 31 | uses: azure/login@v1 32 | with: 33 | creds: ${{ secrets.AZURE_CREDENTIALS }} 34 | 35 | - name: 'Run Azure Functions Container Action' 36 | uses: Azure/functions-container-action@v1 37 | id: fa 38 | with: 39 | app-name: amltest 40 | image: ${{ steps.aml_deploy.outputs.package_location }} 41 | 42 | - name: 'use the published functionapp url in upcoming steps' 43 | run: | 44 | echo "${{ steps.fa.outputs.app-url }}" 45 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Lint and Test 2 | on: [push, pull_request] 3 | jobs: 4 | lint: 5 | name: Lint and Test 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Set up Python 3.7 9 | id: python_setup 10 | uses: actions/setup-python@v2 11 | with: 12 | python-version: "3.7" 13 | 14 | - name: Check Out Repository 15 | id: checkout_repository 16 | uses: actions/checkout@v2 17 | 18 | - name: Lint 19 | id: python_linting 20 | run: | 21 | pip install flake8 22 | flake8 code/ --count --ignore=E501 --show-source --statistics 23 | flake8 tests/ --count --ignore=E501,E402 --show-source --statistics 24 | 25 | - name: Test 26 | id: python_test 27 | run: | 28 | pip install pytest jsonschema azureml-sdk azureml-contrib-functions 29 | pytest 30 | -------------------------------------------------------------------------------- /.github/workflows/synch_docs.yml: -------------------------------------------------------------------------------- 1 | name: Synch Documentation 2 | on: 3 | push: 4 | branches: 5 | - master 6 | paths: 7 | - 'README.md' 8 | jobs: 9 | synch: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check Out Repository 13 | id: checkout_repository 14 | uses: actions/checkout@v2 15 | with: 16 | path: "aml-deploy" 17 | 18 | - name: Check Out Docs Repository 19 | id: checkout_docs_repository 20 | uses: actions/checkout@v2 21 | with: 22 | repository: "machine-learning-apps/MLOps" 23 | path: "MLOps" 24 | 25 | - name: Copy file to docs repo and commit changes 26 | run: | 27 | cp aml-deploy/README.md MLOps/site/docs/aml-deploy.md 28 | cd MLOps 29 | git config --local --unset-all "http.https://github.com/.extraheader" 30 | git config --local user.email "action@github.com" 31 | git config --local user.name "GitHub AML Deploy Action" 32 | git add . 33 | git commit -m "Add changes of deploy readme" -a 34 | git push "https://${{ secrets.GH_TOKEN }}@github.com/machine-learning-apps/MLOps.git" HEAD:master --force 35 | continue-on-error: true 36 | -------------------------------------------------------------------------------- /.github/workflows/versioning.yml: -------------------------------------------------------------------------------- 1 | name: Keep the versions up-to-date 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | actions-tagger: 9 | runs-on: windows-latest 10 | steps: 11 | - uses: Actions-R-Us/actions-tagger@latest 12 | with: 13 | publish_latest_tag: true 14 | env: 15 | GITHUB_TOKEN: '${{secrets.GITHUB_TOKEN}}' 16 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/marvinbuss/aml-docker:1.34.0 2 | 3 | LABEL maintainer="azure/gh_aml" 4 | 5 | COPY /code /code 6 | ENTRYPOINT ["/code/entrypoint.sh"] 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Integration Test](https://github.com/Azure/aml-deploy/workflows/Integration%20Test/badge.svg?branch=master&event=push) 2 | ![Lint and Test](https://github.com/Azure/aml-deploy/workflows/Lint%20and%20Test/badge.svg?branch=master&event=push) 3 | 4 | # GitHub Action for deploying Machine Learning Models to Azure 5 | 6 | ## Deprecation notice 7 | 8 | This Action is deprecated. Instead, consider using the [CLI (v2)](https://docs.microsoft.com/azure/machine-learning/how-to-configure-cli) to manage and interact with Azure Machine Learning endpoints and deployments in GitHub Actions. 9 | 10 | **Important:** The CLI (v2) is not recommended for production use while in preview. 11 | 12 | ## Usage 13 | 14 | The Deploy Machine Learning Models to Azure action will deploy your model on [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning/) using GitHub Actions. 15 | 16 | Get started today with a [free Azure account](https://azure.com/free/open-source)! 17 | 18 | This repository contains GitHub Action for deploying Machine Learning Models to Azure Machine Learning and creates a real-time endpoint on the model to integrate models in other systems. The endpoint can be hosted either on an Azure Container Instance or on an Azure Kubernetes Service. 19 | 20 | 21 | This GitHub Action also allows you to provide a python script that executes tests against the Webservice endpoint after the model deployment has completed successfully. You can enable tests by setting the parameter `test_enabled` to true. In addition to that, you have to provide a python script (default `code/test/test.py`) which includes a function (default ` def main(webservice):`) that describes your tests that you want to execute against the service object. The python script gets the [webservice object](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py) injected. The action fails, if the test script fails. 22 | 23 | 24 | ## Dependencies on other GitHub Actions 25 | * [Checkout](https://github.com/actions/checkout) Checkout your Git repository content into GitHub Actions agent. 26 | * [aml-workspace](https://github.com/Azure/aml-workspace) This action requires an Azure Machine Learning workspace to be present. You can either create a new one or re-use an existing one using the action. 27 | * [aml-registermodel](https://github.com/Azure/aml-registermodel) Before deploying the model, you need to register the model with Azure Machine Learning. If not already registered, you can use this action and use its output in deploy action. 28 | * [aml-compute](https://github.com/Azure/aml-compute) You don't need this if you want to host your endpoint on an ACI instance. But, if you want to host your endpoint on an AKS cluster, you can manage the AKS Cluster via the action. 29 | 30 | 31 | 32 | ## Create Azure Machine Learning and deploy an machine learning model using GitHub Actions 33 | 34 | This action is one in a series of actions that can be used to setup an ML Ops process. **We suggest getting started with one of our template repositories**, which will allow you to create an ML Ops process in less than 5 minutes. 35 | 36 | 1. **Simple template repository: [ml-template-azure](https://github.com/machine-learning-apps/ml-template-azure)** 37 | 38 | Go to this template and follow the getting started guide to setup an ML Ops process within minutes and learn how to use the Azure Machine Learning GitHub Actions in combination. This template demonstrates a very simple process for training and deploying machine learning models. 39 | 40 | 2. **Advanced template repository: [mlops-enterprise-template](https://github.com/Azure-Samples/mlops-enterprise-template)** 41 | 42 | This template demonstrates how approval processes can be included in the process and how training and deployment workflows can be splitted. It also shows how workflows (e.g. deployment) can be triggered from pull requests. More enhancements will be added to this template in the future to make it more enterprise ready. 43 | 44 | ### Example workflow 45 | 46 | ```yaml 47 | name: My Workflow 48 | on: [push, pull_request] 49 | jobs: 50 | build: 51 | runs-on: ubuntu-latest 52 | steps: 53 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 54 | - name: Check Out Repository 55 | id: checkout_repository 56 | uses: actions/checkout@v2 57 | 58 | # AML Workspace Action 59 | - uses: Azure/aml-workspace@v1 60 | id: aml_workspace 61 | with: 62 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 63 | 64 | # AML Register Model Action 65 | - uses: Azure/aml-registermodel@v1 66 | id: aml_registermodel 67 | with: 68 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 69 | run_id: "" 70 | experiment_name: "" 71 | 72 | # Deploy model in Azure Machine Learning to ACI 73 | - name: Deploy model 74 | id: aml_deploy 75 | uses: Azure/aml-deploy@v1 76 | with: 77 | # required inputs 78 | azure_credentials: ${{ secrets.AZURE_CREDENTIALS }} 79 | model_name: ${{ steps.aml_registermodel.outputs.model_name }} 80 | model_version: ${{ steps.aml_registermodel.outputs.model_version }} 81 | # optional inputs 82 | parameters_file: "deploy.json" 83 | ``` 84 | 85 | ### Inputs 86 | 87 | | Input | Required | Default | Description | 88 | | ----- | -------- | ------- | ----------- | 89 | | azure_credentials | x | - | Output of `az ad sp create-for-rbac --name --role contributor --scopes /subscriptions//resourceGroups/ --sdk-auth`. This should be stored in your secrets | 90 | | model_name | x | - | Name of the model that will be deployed. You will get it as an output of register model action as in above example workflow. | 91 | | model_version | x | - | Version of the model that will be deployed. You will get it as an output of register model action as in above example workflow. | 92 | | parameters_file | | `"deploy.json"` | We expect a JSON file in the `.cloud/.azure` folder in root of your repository specifying your model deployment details. If you have want to provide these details in a file other than "deploy.json" you need to provide this input in the action. | 93 | 94 | #### azure_credentials ( Azure Credentials ) 95 | 96 | Azure credentials are required to connect to your Azure Machine Learning Workspace. These may have been created for an action you are already using in your repository, if so, you can skip the steps below. 97 | 98 | Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) on your computer or use the Cloud CLI and execute the following command to generate the required credentials: 99 | 100 | ```sh 101 | # Replace {service-principal-name}, {subscription-id} and {resource-group} with your Azure subscription id and resource group name and any name for your service principle 102 | az ad sp create-for-rbac --name {service-principal-name} \ 103 | --role contributor \ 104 | --scopes /subscriptions/{subscription-id}/resourceGroups/{resource-group} \ 105 | --sdk-auth 106 | ``` 107 | 108 | This will generate the following JSON output: 109 | 110 | ```sh 111 | { 112 | "clientId": "", 113 | "clientSecret": "", 114 | "subscriptionId": "", 115 | "tenantId": "", 116 | (...) 117 | } 118 | ``` 119 | 120 | Add this JSON output as [a secret](https://help.github.com/en/actions/configuring-and-managing-workflows/creating-and-storing-encrypted-secrets#creating-encrypted-secrets) with the name `AZURE_CREDENTIALS` in your GitHub repository. 121 | 122 | 123 | #### parameters_file (Parameters File) 124 | 125 | The action tries to load a JSON file in the `.cloud/.azure` folder in your repository, which specifies details for the model deployment to your Azure Machine Learning Workspace. By default, the action expects a file with the name `deploy.json`. If your JSON file has a different name, you can specify it with this parameter. Note that none of these values are required and, in the absence, defaults will be created with a combination of the repo name and branch name. 126 | 127 | A sample file can be found in this repository in the folder `.cloud/.azure`. There are separate parameters that are used for the ACI deployment, the AKS deployment and some that are common for both deployment options. 128 | 129 | ##### Common parameters 130 | 131 | | Parameter | Required | Allowed Values | Default | Description | 132 | | ----------------------- | -------- | -------------- | ---------- | ----------- | 133 | | name | | str | - | The name to give the deployed service. Must be unique to the workspace, only consist of lowercase letters, numbers, or dashes, start with a letter, and be between 3 and 32 characters long. | 134 | | deployment_compute_target | (for AKS deployment) | str | null | Name of the compute target to deploy the webservice to. As Azure Container Instances has no associated ComputeTarget, leave this parameter as null to deploy to Azure Container Instances. | 135 | | inference_source_directory | | str | `"code/deploy/"` | The path to the folder that contains all files to create the image. | 136 | | inference_entry_script | | str | `"score.py"` | The path to a local file in your repository that contains the code to run for the image and score the data. This path is relative to the specified source directory. The python script has to define an `init` and a `run` function. A sample can be found in the template repositories. | 137 | | conda_file | | str | `"environment.yml"` | The path to a local file in your repository containing a conda environment definition to use for the image. This path is relative to the specified source directory. | 138 | | extra_docker_file_steps | | str | null | The path to a local file in your repository containing additional Docker steps to run when setting up image. This path is relative to the specified source directory. | 139 | | enable_gpu | | bool | false | Indicates whether to enable GPU support in the image. The GPU image must be used on Microsoft Azure Services such as Azure Container Instances, Azure Machine Learning Compute, Azure Virtual Machines, and Azure Kubernetes Service. | 140 | | cuda_version | | str | `"9.1"` if `enable_gpu` is set to true | The Version of CUDA to install for images that need GPU support. The GPU image must be used on Microsoft Azure Services such as Azure Container Instances, Azure Machine Learning Compute, Azure Virtual Machines, and Azure Kubernetes Service. Supported versions are 9.0, 9.1, and 10.0. | 141 | | runtime | | str: `"python"` or `"spark-py"` | `"python"` | The runtime to use for the image. | 142 | | custom_base_image | | str | null | A custom Docker image to be used as base image. If no base image is given then the base image will be used based off of given runtime parameter. | 143 | | model_data_collection_enabled | | bool | false | Whether or not to enable model data collection for this Webservice. | 144 | | authentication_enabled | | bool | false for ACI, true for AKS | Whether or not to enable key auth for this Webservice. | 145 | | app_insights_enabled | | bool | false | Whether or not to enable Application Insights logging for this Webservice. | 146 | | cpu_cores | | float: ]0.0, inf[ | 0.1 | The number of CPU cores to allocate for this Webservice. Can be a decimal. | 147 | | memory_gb | | float: ]0.0, inf[ | 0.5 | The amount of memory (in GB) to allocate for this Webservice. Can be a decimal. | 148 | | delete_service_after_deployment | | bool | false | Indicates whether the service gets deleted after the deployment completed successfully. | 149 | | tags | | dict: {"": "", ...} | null | Dictionary of key value tags to give this Webservice. | 150 | | properties | | dict: {"": "", ...} | | Dictionary of key value properties to give this Webservice. These properties cannot be changed after deployment, however new key value pairs can be added. | 151 | | description | | str | null | A description to give this Webservice and image. | 152 | | test_enabled | | bool | false | Whether to run tests for this model deployment and the created real-time endpoint. | 153 | | test_file_path | | str | `"code/test/test.py"` | Path to the python script in your repository in which you define your own tests that you want to run against the webservice endpoint. The GitHub Action fails, if your script fails. | 154 | | test_file_function_name | | str | `"main"` | Name of the function in your python script in your repository in which you define your own tests that you want to run against the webservice endpoint. The function gets the webservice object injected and allows you to run tests against the scoring uri. The GitHub Action fails, if your script fails. | 155 | | profiling_enabled | | bool | false | Whether or not to profile this model for an optimal combination of cpu and memory. To use this functionality, you also have to provide a model profile dataset (`profiling_dataset`). If the parameter is not specified, the Action will try to use the sample input dataset that the model was registered with. Please, note that profiling is a long running operation and can take up to 25 minutes depending on the size of the dataset. More details can be found [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb). | 156 | | profiling_dataset | | str | null | Name of the dataset that should be used for model profiling. | 157 | | skip_deployment | | bool | false | Indicates whether the deployment to ACI or AKS should be skipped. This can be used in combination with `create_image` to only create a Docker image that can be used for further deployment. | 158 | | create_image | | str: `"docker"`, `"function_blob"`, `"function_http"` or `"function_service_bus_queue"` | null | Indicates whether a Docker image should be created which can be used for further deployment. | 159 | 160 | Please visit [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model.inferenceconfig?view=azure-ml-py) and [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.model(class)?view=azure-ml-py#deploy-workspace--name--models--inference-config-none--deployment-config-none--deployment-target-none--overwrite-false-) for more details. 161 | 162 | ##### ACI specific parameters 163 | 164 | ACI is the default deployment resource. A sample file for an aci deployment can be found in the `.cloud/.azure` folder. 165 | 166 | | Parameter | Required | Allowed Values | Default | Description | 167 | | ---------------------- | -------- | -------------- | ---------- | ----------- | 168 | | location | | str: [supported region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=container-instances) | workspace location | The Azure region to deploy this Webservice to. | 169 | | ssl_enabled | | bool | false | Whether or not to enable SSL for this Webservice. | 170 | | ssl_cert_pem_file | | str | null | A file path to a file containing cert information for SSL validation. Must provide all three CName, cert file, and key file to enable SSL validation. | 171 | | ssl_key_pem_file | | str | null | A file path to a file containing key information for SSL validation. Must provide all three CName, cert file, and key file to enable SSL validation. | 172 | | ssl_cname | | str | null | A CName to use if enabling SSL validation on the cluster. Must provide all three CName, cert file, and key file to enable SSL validation. | 173 | | dns_name_label | | str | null | The DNS name label for the scoring endpoint. If not specified a unique DNS name label will be generated for the scoring endpoint. | 174 | 175 | Please visit [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.aciwebservice?view=azure-ml-py#deploy-configuration-cpu-cores-none--memory-gb-none--tags-none--properties-none--description-none--location-none--auth-enabled-none--ssl-enabled-none--enable-app-insights-none--ssl-cert-pem-file-none--ssl-key-pem-file-none--ssl-cname-none--dns-name-label-none--primary-key-none--secondary-key-none--collect-model-data-none--cmk-vault-base-url-none--cmk-key-name-none--cmk-key-version-none-) for more details. 176 | 177 | ##### AKS Deployment 178 | 179 | For the deployment of the model to AKS, you must configure an AKS resource and specify the name of the AKS cluster with the `deployment_compute_target` parameter. Additional parameters allow you to finetune your deployment on AKS with options like autoscaling and the liveness probe requirements. These will be set to default parameters if not provided. 180 | 181 | | Parameter | Required | Allowed Values | Default | Description | 182 | | ----------------------- | -------- | -------------- | ---------- | ----------- | 183 | | gpu_cores | | int: [0, inf[ | 1 | The number of GPU cores to allocate for this Webservice. | 184 | | autoscale_enabled | | bool | true if `num_replicas` is null | Whether to enable autoscale for this Webservice. | 185 | | autoscale_min_replicas | | int: [1, inf[ | 1 | The minimum number of containers to use when autoscaling this Webservice. | 186 | | autoscale_max_replicas | | int: [1, inf[ | 10 | The maximum number of containers to use when autoscaling this Webservice. | 187 | | autoscale_refresh_seconds | | int: [1, inf[ | 1 | How often the autoscaler should attempt to scale this Webservice (in seconds). | 188 | | autoscale_target_utilization| | int: [1, 100] | 70 | The target utilization (in percent out of 100) the autoscaler should attempt to maintain for this Webservice. | 189 | | scoring_timeout_ms | | int: [1, inf[ | 60000 | A timeout in ms to enforce for scoring calls to this Webservice. | 190 | | replica_max_concurrent_requests| | int: [1, inf[ | 1 | The number of maximum concurrent requests per replica to allow for this Webservice. **Do not change this setting from the default value of 1 unless instructed by Microsoft Technical Support or a member of Azure Machine Learning team.** | 191 | | max_request_wait_time | | int: [0, inf[ | 500 | The maximum amount of time a request will stay in the queue (in milliseconds) before returning a 503 error. | 192 | | num_replicas | | int | null | The number of containers to allocate for this Webservice. **No default, if this parameter is not set then the autoscaler is enabled by default.** | 193 | | period_seconds | | int: [1, inf[ | 10 | How often (in seconds) to perform the liveness probe. | 194 | | initial_delay_seconds | | int: [1, inf[ | 310 | The number of seconds after the container has started before liveness probes are initiated. | 195 | | timeout_seconds | | int: [1, inf[ | 1 | The number of seconds after which the liveness probe times out. | 196 | | success_threshold | | int: [1, inf[ | 1 | The minimum consecutive successes for the liveness probe to be considered successful after having failed. | 197 | | failure_threshold | | int: [1, inf[ | 3 | When a Pod starts and the liveness probe fails, Kubernetes will try failureThreshold times before giving up. | 198 | | namespace | | str | null | The Kubernetes namespace in which to deploy this Webservice: up to 63 lowercase alphanumeric ('a'-'z', '0'-'9') and hyphen ('-') characters. The first and last characters cannot be hyphens. | 199 | | token_auth_enabled | | bool | false | Whether to enable Token authentication for this Webservice. If this is enabled, users can access this Webservice by fetching an access token using their Azure Active Directory credentials. | 200 | 201 | Please visit [this website](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.aks.akswebservice?view=azure-ml-py#deploy-configuration-autoscale-enabled-none--autoscale-min-replicas-none--autoscale-max-replicas-none--autoscale-refresh-seconds-none--autoscale-target-utilization-none--collect-model-data-none--auth-enabled-none--cpu-cores-none--memory-gb-none--enable-app-insights-none--scoring-timeout-ms-none--replica-max-concurrent-requests-none--max-request-wait-time-none--num-replicas-none--primary-key-none--secondary-key-none--tags-none--properties-none--description-none--gpu-cores-none--period-seconds-none--initial-delay-seconds-none--timeout-seconds-none--success-threshold-none--failure-threshold-none--namespace-none--token-auth-enabled-none--compute-target-name-none-) for more details. More Information on autoscaling parameters can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.aks.autoscaler?view=azure-ml-py) and for liveness probe [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice.aks.livenessproberequirements?view=azure-ml-py). 202 | 203 | ### Outputs 204 | 205 | | Output | Description | 206 | | ------------------- | ------------------------------- | 207 | | service_scoring_uri | Scoring URI of the webservice that was created (only provided if `delete_service_after_deployment` is set to False). | 208 | | service_swagger_uri | Swagger Uri of the webservice that was created (only provided if `delete_service_after_deployment` is set to False). | 209 | | acr_address | The DNS name or IP address (e.g. myacr.azurecr.io) of the Azure Container Registry (ACR) (only provided if `create_image` is not None). | 210 | | acr_username | The username for ACR (only provided if `create_image` is not None). | 211 | | acr_password | The password for ACR (only provided if `create_image` is not None). | 212 | | package_location | Full URI of the docker image (e.g. myacr.azurecr.io/azureml/azureml_*) (only provided if `create_image` is not None). | 213 | | profiling_details | Dictionary of details of the model profiling result. This will only be provided, if the model profiling method is used and successfully executed. | 214 | 215 | ### Environment variables 216 | 217 | Certain parameters are considered secrets and should therefore be passed as environment variables from your secrets, if you want to use custom values. 218 | 219 | | Environment variable | Required | Allowed Values | Default | Description | 220 | | --------------------------- | -------- | -------------- | ------- | ----------- | 221 | | CONTAINER_REGISTRY_ADRESS | | str | null | The DNS name or IP address of the Azure Container Registry (ACR). Required, if you specified a `custom_base_image` that is only available in your ACR. | 222 | | CONTAINER_REGISTRY_USERNAME | | str | null | The username for ACR. Required, if you specified a `custom_base_image` that is only available in your ACR. | 223 | | CONTAINER_REGISTRY_PASSWORD | | str | null | The password for ACR. Required, if you specified a `custom_base_image` that is only available in your ACR. | 224 | | PRIMARY_KEY | | str | null | A primary auth key to use for this Webservice. If not specified, Azure will automatically assign a key. | 225 | | SECONDARY_KEY | | str | null | A secondary auth key to use for this Webservice. If not specified, Azure will automatically assign a key. | 226 | | CMK_VAULT_BASE_URL | | str | null | Customer managed Key Vault base url. This value is ACI specific. | 227 | | CMK_KEY_NAME | | str | null | Customer managed key name. This value is ACI specific. | 228 | | CMK_KEY_VERSION | | str | null | Customer managed key version. This value is ACI specific. | 229 | 230 | ### Other Azure Machine Learning Actions 231 | 232 | - [aml-workspace](https://github.com/Azure/aml-workspace) - Connects to or creates a new workspace 233 | - [aml-compute](https://github.com/Azure/aml-compute) - Connects to or creates a new compute target in Azure Machine Learning 234 | - [aml-run](https://github.com/Azure/aml-run) - Submits a ScriptRun, an Estimator or a Pipeline to Azure Machine Learning 235 | - [aml-registermodel](https://github.com/Azure/aml-registermodel) - Registers a model to Azure Machine Learning 236 | - [aml-deploy](https://github.com/Azure/aml-deploy) - Deploys a model and creates an endpoint for the model 237 | 238 | # Contributing 239 | 240 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 241 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 242 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 243 | 244 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 245 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 246 | provided by the bot. You will only need to do this once across all repos using our CLA. 247 | 248 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 249 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 250 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 251 | 252 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: "Azure Machine Learning Deploy Action" 2 | description: "Deploy a registered model in your Azure Machine Learning Workspace with this GitHub Action" 3 | author: "azure/gh-aml" 4 | inputs: 5 | azure_credentials: 6 | description: "Paste output of `az ad sp create-for-rbac --name --role contributor --scopes /subscriptions//resourceGroups/ --sdk-auth` as value of secret variable: AZURE_CREDENTIALS" 7 | required: true 8 | model_name: 9 | description: "Name of the model that will be deployed" 10 | required: true 11 | model_version: 12 | description: "Version of the model that will be deployed" 13 | required: true 14 | parameters_file: 15 | description: "JSON file including the parameters for deployment. This looks in the .ml/.azure/ directory" 16 | required: true 17 | default: "deploy.json" 18 | outputs: 19 | service_scoring_uri: 20 | description: "Scoring URI of the webservice that was created (only provided if delete_service_after_deployment is set to False)" 21 | service_swagger_uri: 22 | description: "Swagger URI of the webservice that was created (only provided if delete_service_after_deployment is set to False)" 23 | acr_address: 24 | description: "The DNS name or IP address (e.g. myacr.azurecr.io) of the Azure Container Registry (ACR) (only provided if create_image is not None)" 25 | acr_username: 26 | description: "The username for ACR (only provided if create_image is not None)" 27 | acr_password: 28 | description: "The password for ACR (only provided if create_image is not None)" 29 | package_location: 30 | description: "Full URI of the docker image (e.g. myacr.azurecr.io/azureml/azureml_*) (only provided if create_image is not None)" 31 | profiling_details: 32 | description: "Dictionary of details of the model profiling result. This will only be provided, if the model profiling method is used and successfully executed." 33 | branding: 34 | icon: "chevron-up" 35 | color: "blue" 36 | runs: 37 | using: "docker" 38 | image: "Dockerfile" 39 | -------------------------------------------------------------------------------- /code/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | python /code/main.py 6 | -------------------------------------------------------------------------------- /code/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import importlib 5 | 6 | from azureml.contrib.functions import package_http, package_blob, package_service_bus_queue 7 | from azureml.core import Workspace, Model, ContainerRegistry 8 | from azureml.core.compute import ComputeTarget, AksCompute 9 | from azureml.core.model import InferenceConfig 10 | from azureml.core.webservice import AksWebservice, AciWebservice 11 | from azureml.exceptions import ComputeTargetException, AuthenticationException, ProjectSystemException, WebserviceException 12 | from azureml.core.authentication import ServicePrincipalAuthentication 13 | from adal.adal_error import AdalError 14 | from msrest.exceptions import AuthenticationError 15 | from json import JSONDecodeError 16 | from utils import AMLConfigurationException, AMLDeploymentException, get_resource_config, mask_parameter, validate_json, get_dataset 17 | from schemas import azure_credentials_schema, parameters_schema 18 | 19 | 20 | def main(): 21 | # Loading input values 22 | print("::debug::Loading input values") 23 | model_name = os.environ.get("INPUT_MODEL_NAME", default=None) 24 | model_version = os.environ.get("INPUT_MODEL_VERSION", default=None) 25 | 26 | # Casting input values 27 | print("::debug::Casting input values") 28 | try: 29 | model_version = int(model_version) 30 | except TypeError as exception: 31 | print(f"::debug::Could not cast model version to int: {exception}") 32 | model_version = None 33 | except ValueError as exception: 34 | print(f"::debug::Could not cast model version to int: {exception}") 35 | model_version = None 36 | 37 | # Loading azure credentials 38 | print("::debug::Loading azure credentials") 39 | azure_credentials = os.environ.get("INPUT_AZURE_CREDENTIALS", default="{}") 40 | try: 41 | azure_credentials = json.loads(azure_credentials) 42 | except JSONDecodeError: 43 | print("::error::Please paste output of `az ad sp create-for-rbac --name --role contributor --scopes /subscriptions//resourceGroups/ --sdk-auth` as value of secret variable: AZURE_CREDENTIALS") 44 | raise AMLConfigurationException("Incorrect or poorly formed output from azure credentials saved in AZURE_CREDENTIALS secret. See setup in https://github.com/Azure/aml-compute/blob/master/README.md") 45 | 46 | # Checking provided parameters 47 | print("::debug::Checking provided parameters") 48 | validate_json( 49 | data=azure_credentials, 50 | schema=azure_credentials_schema, 51 | input_name="AZURE_CREDENTIALS" 52 | ) 53 | 54 | # Mask values 55 | print("::debug::Masking parameters") 56 | mask_parameter(parameter=azure_credentials.get("tenantId", "")) 57 | mask_parameter(parameter=azure_credentials.get("clientId", "")) 58 | mask_parameter(parameter=azure_credentials.get("clientSecret", "")) 59 | mask_parameter(parameter=azure_credentials.get("subscriptionId", "")) 60 | 61 | # Loading parameters file 62 | print("::debug::Loading parameters file") 63 | parameters_file = os.environ.get("INPUT_PARAMETERS_FILE", default="deploy.json") 64 | parameters_file_path = os.path.join(".cloud", ".azure", parameters_file) 65 | try: 66 | with open(parameters_file_path) as f: 67 | parameters = json.load(f) 68 | except FileNotFoundError: 69 | print(f"::debug::Could not find parameter file in {parameters_file_path}. Please provide a parameter file in your repository if you do not want to use default settings (e.g. .cloud/.azure/deploy.json).") 70 | parameters = {} 71 | 72 | # Checking provided parameters 73 | print("::debug::Checking provided parameters") 74 | validate_json( 75 | data=parameters, 76 | schema=parameters_schema, 77 | input_name="PARAMETERS_FILE" 78 | ) 79 | 80 | # Define target cloud 81 | if azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.usgovcloudapi.net"): 82 | cloud = "AzureUSGovernment" 83 | elif azure_credentials.get("resourceManagerEndpointUrl", "").startswith("https://management.chinacloudapi.cn"): 84 | cloud = "AzureChinaCloud" 85 | else: 86 | cloud = "AzureCloud" 87 | 88 | # Loading Workspace 89 | print("::debug::Loading AML Workspace") 90 | sp_auth = ServicePrincipalAuthentication( 91 | tenant_id=azure_credentials.get("tenantId", ""), 92 | service_principal_id=azure_credentials.get("clientId", ""), 93 | service_principal_password=azure_credentials.get("clientSecret", ""), 94 | cloud=cloud 95 | ) 96 | config_file_path = os.environ.get("GITHUB_WORKSPACE", default=".cloud/.azure") 97 | config_file_name = "aml_arm_config.json" 98 | try: 99 | ws = Workspace.from_config( 100 | path=config_file_path, 101 | _file_name=config_file_name, 102 | auth=sp_auth 103 | ) 104 | except AuthenticationException as exception: 105 | print(f"::error::Could not retrieve user token. Please paste output of `az ad sp create-for-rbac --name --role contributor --scopes /subscriptions//resourceGroups/ --sdk-auth` as value of secret variable: AZURE_CREDENTIALS: {exception}") 106 | raise AuthenticationException 107 | except AuthenticationError as exception: 108 | print(f"::error::Microsoft REST Authentication Error: {exception}") 109 | raise AuthenticationError 110 | except AdalError as exception: 111 | print(f"::error::Active Directory Authentication Library Error: {exception}") 112 | raise AdalError 113 | except ProjectSystemException as exception: 114 | print(f"::error::Workspace authorizationfailed: {exception}") 115 | raise ProjectSystemException 116 | 117 | # Loading model 118 | print("::debug::Loading model") 119 | try: 120 | model = Model( 121 | workspace=ws, 122 | name=model_name, 123 | version=model_version 124 | ) 125 | except WebserviceException as exception: 126 | print(f"::error::Could not load model with provided details: {exception}") 127 | raise AMLConfigurationException(f"Could not load model with provided details: {exception}") 128 | 129 | # Creating inference config 130 | print("::debug::Creating inference config") 131 | if os.environ.get("CONTAINER_REGISTRY_ADRESS", None) is not None: 132 | container_registry = ContainerRegistry() 133 | container_registry.address = os.environ.get("CONTAINER_REGISTRY_ADRESS", None) 134 | container_registry.username = os.environ.get("CONTAINER_REGISTRY_USERNAME", None) 135 | container_registry.password = os.environ.get("CONTAINER_REGISTRY_PASSWORD", None) 136 | else: 137 | container_registry = None 138 | 139 | try: 140 | inference_config = InferenceConfig( 141 | entry_script=parameters.get("inference_entry_script", "score.py"), 142 | runtime=parameters.get("runtime", "python"), 143 | conda_file=parameters.get("conda_file", "environment.yml"), 144 | extra_docker_file_steps=parameters.get("extra_docker_file_steps", None), 145 | source_directory=parameters.get("inference_source_directory", "code/deploy/"), 146 | enable_gpu=parameters.get("enable_gpu", None), 147 | description=parameters.get("description", None), 148 | base_image=parameters.get("custom_base_image", None), 149 | base_image_registry=container_registry, 150 | cuda_version=parameters.get("cuda_version", None) 151 | ) 152 | except WebserviceException as exception: 153 | print(f"::debug::Failed to create InferenceConfig. Trying to create no code deployment: {exception}") 154 | inference_config = None 155 | except TypeError as exception: 156 | print(f"::debug::Failed to create InferenceConfig. Trying to create no code deployment: {exception}") 157 | inference_config = None 158 | 159 | # Skip deployment if only Docker image should be created 160 | if not parameters.get("skip_deployment", False): 161 | # Default service name 162 | repository_name = os.environ.get("GITHUB_REPOSITORY").split("/")[-1] 163 | branch_name = os.environ.get("GITHUB_REF").split("/")[-1] 164 | default_service_name = f"{repository_name}-{branch_name}".lower().replace("_", "-") 165 | service_name = parameters.get("name", default_service_name)[:32] 166 | 167 | # Loading run config 168 | print("::debug::Loading run config") 169 | model_resource_config = model.resource_configuration 170 | cpu_cores = get_resource_config( 171 | config=parameters.get("cpu_cores", None), 172 | resource_config=model_resource_config, 173 | config_name="cpu" 174 | ) 175 | memory_gb = get_resource_config( 176 | config=parameters.get("memory_gb", None), 177 | resource_config=model_resource_config, 178 | config_name="memory_in_gb" 179 | ) 180 | gpu_cores = get_resource_config( 181 | config=parameters.get("gpu_cores", None), 182 | resource_config=model_resource_config, 183 | config_name="gpu" 184 | ) 185 | 186 | # Profiling model 187 | print("::debug::Profiling model") 188 | if parameters.get("profiling_enabled", False): 189 | # Getting profiling dataset 190 | profiling_dataset = get_dataset( 191 | workspace=ws, 192 | name=parameters.get("profiling_dataset", None) 193 | ) 194 | if profiling_dataset is None: 195 | profiling_dataset = model.sample_input_dataset 196 | 197 | # Profiling model 198 | try: 199 | model_profile = Model.profile( 200 | workspace=ws, 201 | profile_name=f"{service_name}-profile"[:32], 202 | models=[model], 203 | inference_config=inference_config, 204 | input_dataset=profiling_dataset 205 | ) 206 | model_profile.wait_for_completion(show_output=True) 207 | 208 | # Overwriting resource configuration 209 | cpu_cores = model_profile.recommended_cpu 210 | memory_gb = model_profile.recommended_memory 211 | 212 | # Setting output 213 | profiling_details = model_profile.get_details() 214 | print(f"::set-output name=profiling_details::{profiling_details}") 215 | except Exception as exception: 216 | print(f"::warning::Failed to profile model. Skipping profiling and moving on to deployment: {exception}") 217 | 218 | # Loading deployment target 219 | print("::debug::Loading deployment target") 220 | try: 221 | deployment_target = ComputeTarget( 222 | workspace=ws, 223 | name=parameters.get("deployment_compute_target", "") 224 | ) 225 | except ComputeTargetException: 226 | deployment_target = None 227 | except TypeError: 228 | deployment_target = None 229 | 230 | # Creating deployment config 231 | print("::debug::Creating deployment config") 232 | if type(deployment_target) is AksCompute: 233 | deployment_config = AksWebservice.deploy_configuration( 234 | autoscale_enabled=parameters.get("autoscale_enabled", None), 235 | autoscale_min_replicas=parameters.get("autoscale_min_replicas", None), 236 | autoscale_max_replicas=parameters.get("autoscale_max_replicas", None), 237 | autoscale_refresh_seconds=parameters.get("autoscale_refresh_seconds", None), 238 | autoscale_target_utilization=parameters.get("autoscale_target_utilization", None), 239 | collect_model_data=parameters.get("model_data_collection_enabled", None), 240 | auth_enabled=parameters.get("authentication_enabled", None), 241 | cpu_cores=cpu_cores, 242 | memory_gb=memory_gb, 243 | enable_app_insights=parameters.get("app_insights_enabled", None), 244 | scoring_timeout_ms=parameters.get("scoring_timeout_ms", None), 245 | replica_max_concurrent_requests=parameters.get("replica_max_concurrent_requests", None), 246 | max_request_wait_time=parameters.get("max_request_wait_time", None), 247 | num_replicas=parameters.get("num_replicas", None), 248 | primary_key=os.environ.get("PRIMARY_KEY", None), 249 | secondary_key=os.environ.get("SECONDARY_KEY", None), 250 | tags=parameters.get("tags", None), 251 | properties=parameters.get("properties", None), 252 | description=parameters.get("description", None), 253 | gpu_cores=gpu_cores, 254 | period_seconds=parameters.get("period_seconds", None), 255 | initial_delay_seconds=parameters.get("initial_delay_seconds", None), 256 | timeout_seconds=parameters.get("timeout_seconds", None), 257 | success_threshold=parameters.get("success_threshold", None), 258 | failure_threshold=parameters.get("failure_threshold", None), 259 | namespace=parameters.get("namespace", None), 260 | token_auth_enabled=parameters.get("token_auth_enabled", None) 261 | ) 262 | else: 263 | deployment_config = AciWebservice.deploy_configuration( 264 | cpu_cores=cpu_cores, 265 | memory_gb=memory_gb, 266 | tags=parameters.get("tags", None), 267 | properties=parameters.get("properties", None), 268 | description=parameters.get("description", None), 269 | location=parameters.get("location", None), 270 | auth_enabled=parameters.get("authentication_enabled", None), 271 | ssl_enabled=parameters.get("ssl_enabled", None), 272 | enable_app_insights=parameters.get("app_insights_enabled", None), 273 | ssl_cert_pem_file=parameters.get("ssl_cert_pem_file", None), 274 | ssl_key_pem_file=parameters.get("ssl_key_pem_file", None), 275 | ssl_cname=parameters.get("ssl_cname", None), 276 | dns_name_label=parameters.get("dns_name_label", None), 277 | primary_key=os.environ.get("PRIMARY_KEY", None), 278 | secondary_key=os.environ.get("SECONDARY_KEY", None), 279 | collect_model_data=parameters.get("model_data_collection_enabled", None), 280 | cmk_vault_base_url=os.environ.get("CMK_VAULT_BASE_URL", None), 281 | cmk_key_name=os.environ.get("CMK_KEY_NAME", None), 282 | cmk_key_version=os.environ.get("CMK_KEY_VERSION", None) 283 | ) 284 | 285 | # Deploying model 286 | print("::debug::Deploying model") 287 | try: 288 | service = Model.deploy( 289 | workspace=ws, 290 | name=service_name, 291 | models=[model], 292 | inference_config=inference_config, 293 | deployment_config=deployment_config, 294 | deployment_target=deployment_target, 295 | overwrite=True 296 | ) 297 | service.wait_for_deployment(show_output=True) 298 | except WebserviceException as exception: 299 | print(f"::error::Model deployment failed with exception: {exception}") 300 | service_logs = service.get_logs() 301 | raise AMLDeploymentException(f"Model deployment failed logs: {service_logs} \nexception: {exception}") 302 | 303 | # Checking status of service 304 | print("::debug::Checking status of service") 305 | if service.state != "Healthy": 306 | service_logs = service.get_logs() 307 | print(f"::error::Model deployment failed with state '{service.state}': {service_logs}") 308 | raise AMLDeploymentException(f"Model deployment failed with state '{service.state}': {service_logs}") 309 | 310 | if parameters.get("test_enabled", False): 311 | # Testing service 312 | print("::debug::Testing service") 313 | root = os.environ.get("GITHUB_WORKSPACE", default=None) 314 | test_file_path = parameters.get("test_file_path", "code/test/test.py") 315 | test_file_function_name = parameters.get("test_file_function_name", "main") 316 | 317 | print("::debug::Adding root to system path") 318 | sys.path.insert(1, f"{root}") 319 | 320 | print("::debug::Importing module") 321 | test_file_path = f"{test_file_path}.py" if not test_file_path.endswith(".py") else test_file_path 322 | try: 323 | test_spec = importlib.util.spec_from_file_location( 324 | name="testmodule", 325 | location=test_file_path 326 | ) 327 | test_module = importlib.util.module_from_spec(spec=test_spec) 328 | test_spec.loader.exec_module(test_module) 329 | test_function = getattr(test_module, test_file_function_name, None) 330 | except ModuleNotFoundError as exception: 331 | print(f"::error::Could not load python script in your repository which defines theweb service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 332 | raise AMLConfigurationException(f"Could not load python script in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 333 | except FileNotFoundError as exception: 334 | print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 335 | raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 336 | except AttributeError as exception: 337 | print(f"::error::Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 338 | raise AMLConfigurationException(f"Could not load python script or function in your repository which defines the web service tests (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 339 | 340 | # Load experiment config 341 | print("::debug::Loading experiment config") 342 | try: 343 | test_function(service) 344 | except TypeError as exception: 345 | print(f"::error::Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 346 | raise AMLConfigurationException(f"Could not load experiment config from your module (Script: /{test_file_path}, Function: {test_file_function_name}()): {exception}") 347 | except Exception as exception: 348 | print(f"::error::The webservice tests did not complete successfully: {exception}") 349 | raise AMLDeploymentException(f"The webservice tests did not complete successfully: {exception}") 350 | 351 | # Deleting service if desired 352 | if parameters.get("delete_service_after_deployment", False): 353 | service.delete() 354 | else: 355 | # Creating outputs 356 | print("::debug::Creating outputs") 357 | print(f"::set-output name=service_scoring_uri::{service.scoring_uri}") 358 | print(f"::set-output name=service_swagger_uri::{service.swagger_uri}") 359 | 360 | # Creating Docker image 361 | if parameters.get("create_image", None) is not None: 362 | try: 363 | # Packaging model 364 | if parameters.get("create_image", None) == "docker": 365 | package = Model.package( 366 | workspace=ws, 367 | models=[model], 368 | inference_config=inference_config, 369 | generate_dockerfile=False 370 | ) 371 | if parameters.get("create_image", None) == "function_blob": 372 | package = package_blob( 373 | workspace=ws, 374 | models=[model], 375 | inference_config=inference_config, 376 | generate_dockerfile=False, 377 | input_path=os.environ.get("FUNCTION_BLOB_INPUT"), 378 | output_path=os.environ.get("FUNCTION_BLOB_OUTPUT") 379 | ) 380 | if parameters.get("create_image", None) == "function_http": 381 | package = package_http( 382 | workspace=ws, 383 | models=[model], 384 | inference_config=inference_config, 385 | generate_dockerfile=False, 386 | auth_level=os.environ.get("FUNCTION_HTTP_AUTH_LEVEL") 387 | ) 388 | if parameters.get("create_image", None) == "function_service_bus_queue": 389 | package = package_service_bus_queue( 390 | workspace=ws, 391 | models=[model], 392 | inference_config=inference_config, 393 | generate_dockerfile=False, 394 | input_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_INPUT"), 395 | output_queue_name=os.environ.get("FUNCTION_SERVICE_BUS_QUEUE_OUTPUT") 396 | ) 397 | 398 | # Getting container registry details 399 | acr = package.get_container_registry() 400 | mask_parameter(parameter=acr.address) 401 | mask_parameter(parameter=acr.username) 402 | mask_parameter(parameter=acr.password) 403 | 404 | # Wait for completion and pull image 405 | package.wait_for_creation(show_output=True) 406 | 407 | # Creating additional outputs 408 | print("::debug::Creating outputs") 409 | print(f"::set-output name=acr_address::{acr.address}") 410 | print(f"::set-output name=acr_username::{acr.username}") 411 | print(f"::set-output name=acr_password::{acr.password}") 412 | print(f"::set-output name=package_location::{package.location}") 413 | except WebserviceException as exception: 414 | print(f"::error::Image creation failed with exception: {exception}") 415 | package_logs = package.get_logs() 416 | raise AMLDeploymentException(f"Image creation failed with logs: {package_logs}") 417 | print("::debug::Successfully finished Azure Machine Learning Deploy Action") 418 | 419 | 420 | if __name__ == "__main__": 421 | main() 422 | -------------------------------------------------------------------------------- /code/schemas.py: -------------------------------------------------------------------------------- 1 | azure_credentials_schema = { 2 | "$id": "http://azure-ml.com/schemas/azure_credentials.json", 3 | "$schema": "http://json-schema.org/schema", 4 | "title": "azure_credentials", 5 | "description": "JSON specification for your azure credentials", 6 | "type": "object", 7 | "required": ["clientId", "clientSecret", "subscriptionId", "tenantId"], 8 | "properties": { 9 | "clientId": { 10 | "type": "string", 11 | "description": "The client ID of the service principal." 12 | }, 13 | "clientSecret": { 14 | "type": "string", 15 | "description": "The client secret of the service principal." 16 | }, 17 | "subscriptionId": { 18 | "type": "string", 19 | "description": "The subscription ID that should be used." 20 | }, 21 | "tenantId": { 22 | "type": "string", 23 | "description": "The tenant ID of the service principal." 24 | } 25 | } 26 | } 27 | 28 | parameters_schema = { 29 | "$id": "http://azure-ml.com/schemas/deploy.json", 30 | "$schema": "http://json-schema.org/schema", 31 | "title": "aml-registermodel", 32 | "description": "JSON specification for your deploy details", 33 | "type": "object", 34 | "properties": { 35 | "name": { 36 | "type": "string", 37 | "description": "The name to give the deployed service.", 38 | "minLength": 3, 39 | "maxLength": 32 40 | }, 41 | "deployment_compute_target": { 42 | "type": "string", 43 | "description": "Name of the compute target to deploy the webservice to." 44 | }, 45 | "inference_source_directory": { 46 | "type": "string", 47 | "description": "The path to the folder that contains all files to create the image." 48 | }, 49 | "inference_entry_script": { 50 | "type": "string", 51 | "description": "The path to a local file in your repository that contains the code to run for the image and score the data." 52 | }, 53 | "test_enabled": { 54 | "type": "boolean", 55 | "description": "Whether to run tests for this model deployment and the created real-time endpoint." 56 | }, 57 | "test_file_path": { 58 | "type": "string", 59 | "description": "Path to the python script in your repository in which you define your own tests that you want to run against the webservice endpoint." 60 | }, 61 | "test_file_function_name": { 62 | "type": "string", 63 | "description": "Name of the function in your python script in your repository in which you define your own tests that you want to run against the webservice endpoint." 64 | }, 65 | "conda_file": { 66 | "type": "string", 67 | "description": "The path to a local file in your repository containing a conda environment definition to use for the image." 68 | }, 69 | "extra_docker_file_steps": { 70 | "type": "string", 71 | "description": "The path to a local file in your repository containing additional Docker steps to run when setting up image." 72 | }, 73 | "enable_gpu": { 74 | "type": "boolean", 75 | "description": "Indicates whether to enable GPU support in the image." 76 | }, 77 | "cuda_version": { 78 | "type": "string", 79 | "description": "The Version of CUDA to install for images that need GPU support." 80 | }, 81 | "model_data_collection_enabled": { 82 | "type": "boolean", 83 | "description": "Whether or not to enable model data collection for this Webservice." 84 | }, 85 | "authentication_enabled": { 86 | "type": "boolean", 87 | "description": "Whether or not to enable key auth for this Webservice." 88 | }, 89 | "app_insights_enabled": { 90 | "type": "boolean", 91 | "description": "Whether or not to enable Application Insights logging for this Webservice." 92 | }, 93 | "runtime": { 94 | "type": "string", 95 | "description": "The runtime to use for the image.", 96 | "pattern": "python|spark-py" 97 | }, 98 | "custom_base_image": { 99 | "type": "string", 100 | "description": "A custom Docker image to be used as base image." 101 | }, 102 | "profiling_enabled": { 103 | "type": "boolean", 104 | "description": "Whether or not to profile this model for an optimal combination of cpu and memory." 105 | }, 106 | "profiling_dataset": { 107 | "type": "string", 108 | "description": "The name of the dataset that should be used for profiling." 109 | }, 110 | "cpu_cores": { 111 | "type": "number", 112 | "description": "The number of CPU cores to allocate for this Webservice.", 113 | "exclusiveMinimum": 0.0 114 | }, 115 | "memory_gb": { 116 | "type": "number", 117 | "description": "The amount of memory (in GB) to allocate for this Webservice.", 118 | "exclusiveMinimum": 0.0 119 | }, 120 | "delete_service_after_deployment": { 121 | "type": "boolean", 122 | "description": "Indicates whether the service gets deleted after the deployment completed successfully." 123 | }, 124 | "skip_deployment": { 125 | "type": "boolean", 126 | "description": "Indicates whether the deployment to ACI or AKS should be skipped. This can be used in combination with `create_image` to only create a Docker image that can be used for further deployment." 127 | }, 128 | "create_image": { 129 | "type": "string", 130 | "description": "Indicates whether a Docker image should be created which can be used for further deployment.", 131 | "pattern": "docker|function_blob|function_http|function_service_bus_queue" 132 | }, 133 | "tags": { 134 | "type": "object", 135 | "description": "Dictionary of key value tags to give this Webservice." 136 | }, 137 | "properties": { 138 | "type": "object", 139 | "description": "Dictionary of key value properties to give this Webservice." 140 | }, 141 | "description": { 142 | "type": "string", 143 | "description": "A description to give this Webservice and image." 144 | }, 145 | "location": { 146 | "type": "string", 147 | "description": "The Azure region to deploy this Webservice to." 148 | }, 149 | "ssl_enabled": { 150 | "type": "boolean", 151 | "description": "Whether or not to enable SSL for this Webservice." 152 | }, 153 | "ssl_cert_pem_file": { 154 | "type": "string", 155 | "description": "A file path to a file containing cert information for SSL validation." 156 | }, 157 | "ssl_key_pem_file": { 158 | "type": "string", 159 | "description": "A file path to a file containing key information for SSL validation." 160 | }, 161 | "ssl_cname": { 162 | "type": "string", 163 | "description": "A CName to use if enabling SSL validation on the cluster." 164 | }, 165 | "dns_name_label": { 166 | "type": "string", 167 | "description": "The DNS name label for the scoring endpoint." 168 | }, 169 | "gpu_cores": { 170 | "type": "integer", 171 | "description": "The number of GPU cores to allocate for this Webservice.", 172 | "minimum": 0 173 | }, 174 | "autoscale_enabled": { 175 | "type": "boolean", 176 | "description": "Whether to enable autoscale for this Webservice." 177 | }, 178 | "autoscale_min_replicas": { 179 | "type": "integer", 180 | "description": "The minimum number of containers to use when autoscaling this Webservice.", 181 | "minimum": 1 182 | }, 183 | "autoscale_max_replicas": { 184 | "type": "integer", 185 | "description": "The maximum number of containers to use when autoscaling this Webservice.", 186 | "minimum": 1 187 | }, 188 | "autoscale_refresh_seconds": { 189 | "type": "integer", 190 | "description": "How often the autoscaler should attempt to scale this Webservice (in seconds).", 191 | "minimum": 1 192 | }, 193 | "autoscale_target_utilization": { 194 | "type": "integer", 195 | "description": "The target utilization (in percent out of 100) the autoscaler should attempt to maintain for this Webservice.", 196 | "minimum": 1, 197 | "maximum": 100 198 | }, 199 | "scoring_timeout_ms": { 200 | "type": "integer", 201 | "description": "A timeout in ms to enforce for scoring calls to this Webservice.", 202 | "minimum": 1 203 | }, 204 | "replica_max_concurrent_requests": { 205 | "type": "integer", 206 | "description": "The number of maximum concurrent requests per replica to allow for this Webservice.", 207 | "minimum": 1 208 | }, 209 | "max_request_wait_time": { 210 | "type": "integer", 211 | "description": "The maximum amount of time a request will stay in the queue (in milliseconds) before returning a 503 error.", 212 | "minimum": 0 213 | }, 214 | "num_replicas": { 215 | "type": "integer", 216 | "description": "The number of containers to allocate for this Webservice." 217 | }, 218 | "period_seconds": { 219 | "type": "integer", 220 | "description": "How often (in seconds) to perform the liveness probe.", 221 | "minimum": 1 222 | }, 223 | "initial_delay_seconds": { 224 | "type": "integer", 225 | "description": "The number of seconds after the container has started before liveness probes are initiated.", 226 | "minimum": 1 227 | }, 228 | "timeout_seconds": { 229 | "type": "integer", 230 | "description": "The number of seconds after which the liveness probe times out.", 231 | "minimum": 1 232 | }, 233 | "success_threshold": { 234 | "type": "integer", 235 | "description": "The minimum consecutive successes for the liveness probe to be considered successful after having failed.", 236 | "minimum": 1 237 | }, 238 | "failure_threshold": { 239 | "type": "integer", 240 | "description": "When a Pod starts and the liveness probe fails, Kubernetes will try failureThreshold times before giving up.", 241 | "minimum": 1 242 | }, 243 | "namespace": { 244 | "type": "string", 245 | "description": "The Kubernetes namespace in which to deploy this Webservice.", 246 | "maxLength": 63, 247 | "pattern": "([a-z0-9-])+" 248 | }, 249 | "token_auth_enabled": { 250 | "type": "boolean", 251 | "description": "Whether to enable Token authentication for this Webservice." 252 | } 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /code/utils.py: -------------------------------------------------------------------------------- 1 | import jsonschema 2 | from azureml.core import Dataset 3 | 4 | 5 | class AMLConfigurationException(Exception): 6 | pass 7 | 8 | 9 | class AMLDeploymentException(Exception): 10 | pass 11 | 12 | 13 | def validate_json(data, schema, input_name): 14 | validator = jsonschema.Draft7Validator(schema) 15 | errors = list(validator.iter_errors(data)) 16 | if len(errors) > 0: 17 | for error in errors: 18 | print(f"::error::JSON validation error: {error}") 19 | raise AMLConfigurationException(f"JSON validation error for '{input_name}'. Provided object does not match schema. Please check the output for more details.") 20 | else: 21 | print(f"::debug::JSON validation passed for '{input_name}'. Provided object does match schema.") 22 | 23 | 24 | def get_resource_config(config, resource_config, config_name): 25 | if config is not None: 26 | return config 27 | elif resource_config is not None: 28 | return resource_config.serialize().get(config_name, None) 29 | return None 30 | 31 | 32 | def mask_parameter(parameter): 33 | print(f"::add-mask::{parameter}") 34 | 35 | 36 | def get_dataset(workspace, name): 37 | try: 38 | dataset = Dataset.get_by_name( 39 | workspace=workspace, 40 | name=name, 41 | version="latest" 42 | ) 43 | except Exception: 44 | dataset = None 45 | return dataset 46 | -------------------------------------------------------------------------------- /tests/deploy/environment.yml: -------------------------------------------------------------------------------- 1 | # Conda environment specification. Details about the Conda environment file format: 2 | # https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually 3 | 4 | name: deployment_env 5 | dependencies: 6 | - python=3.6.2 7 | - scikit-learn=0.22.2 8 | - numpy=1.18.1 9 | - pip: 10 | - azureml-defaults==1.1.5 11 | - azureml-monitoring==0.1.0a18 12 | - inference-schema==1.0.2 13 | - inference-schema[numpy-support]==1.0.2 14 | channels: 15 | - conda-forge 16 | -------------------------------------------------------------------------------- /tests/deploy/score.py: -------------------------------------------------------------------------------- 1 | import joblib 2 | import numpy as np 3 | 4 | from azureml.core import Model 5 | from azureml.monitoring import ModelDataCollector 6 | from inference_schema.schema_decorators import input_schema, output_schema 7 | from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType 8 | from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType 9 | 10 | 11 | # The init() method is called once, when the web service starts up. 12 | # Typically you would deserialize the model file, as shown here using joblib, 13 | # and store it in a global variable so your run() method can access it later. 14 | def init(): 15 | global model 16 | global inputs_dc, prediction_dc 17 | # The AZUREML_MODEL_DIR environment variable indicates 18 | # a directory containing the model file you registered. 19 | model_path = Model.get_model_path(model_name="mymodel") 20 | model = joblib.load(model_path) 21 | inputs_dc = ModelDataCollector("sample-model", designation="inputs", feature_names=["feat1", "feat2", "feat3", "feat4"]) 22 | prediction_dc = ModelDataCollector("sample-model", designation="predictions", feature_names=["prediction"]) 23 | 24 | 25 | # The run() method is called each time a request is made to the scoring API. 26 | # Shown here are the optional input_schema and output_schema decorators 27 | # from the inference-schema pip package. Using these decorators on your 28 | # run() method parses and validates the incoming payload against 29 | # the example input you provide here. This will also generate a Swagger 30 | # API document for your web service. 31 | @input_schema('data', NumpyParameterType(np.array([[0.1, 1.2, 2.3, 3.4]]))) 32 | @output_schema(StandardPythonParameterType({'predict': [['Iris-virginica']]})) 33 | def run(data): 34 | # Use the model object loaded by init(). 35 | result = model.predict(data) 36 | inputs_dc.collect(data) # this call is saving our input data into Azure Blob 37 | prediction_dc.collect(result) # this call is saving our input data into Azure Blob 38 | 39 | # You can return any JSON-serializable object. 40 | return {"predict": result.tolist()} 41 | -------------------------------------------------------------------------------- /tests/test_main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pytest 4 | 5 | myPath = os.path.dirname(os.path.abspath(__file__)) 6 | sys.path.insert(0, os.path.join(myPath, "..", "code")) 7 | 8 | from main import main 9 | from utils import AMLConfigurationException 10 | 11 | 12 | def test_main_no_input(): 13 | """ 14 | Unit test to check the main function with no inputs 15 | """ 16 | with pytest.raises(AMLConfigurationException): 17 | assert main() 18 | 19 | 20 | def test_main_invalid_azure_credentials(): 21 | os.environ["INPUT_AZURE_CREDENTIALS"] = "" 22 | with pytest.raises(AMLConfigurationException): 23 | assert main() 24 | 25 | 26 | def test_main_invalid_parameters_file(): 27 | os.environ["INPUT_AZURE_CREDENTIALS"] = """{ 28 | 'clientId': 'test', 29 | 'clientSecret': 'test', 30 | 'subscriptionId': 'test', 31 | 'tenantId': 'test' 32 | }""" 33 | os.environ["INPUT_PARAMETERS_FILE"] = "wrongfile.json" 34 | with pytest.raises(AMLConfigurationException): 35 | assert main() 36 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pytest 4 | 5 | myPath = os.path.dirname(os.path.abspath(__file__)) 6 | sys.path.insert(0, os.path.join(myPath, "..", "code")) 7 | 8 | from utils import validate_json, AMLConfigurationException 9 | from schemas import azure_credentials_schema 10 | 11 | 12 | def test_validate_json_valid_inputs(): 13 | """ 14 | Unit test to check the validate_json function with valid inputs 15 | """ 16 | json_object = { 17 | "clientId": "", 18 | "clientSecret": "", 19 | "subscriptionId": "", 20 | "tenantId": "" 21 | } 22 | schema_object = azure_credentials_schema 23 | validate_json( 24 | data=json_object, 25 | schema=schema_object, 26 | input_name="PARAMETERS_FILE" 27 | ) 28 | 29 | 30 | def test_validate_json_invalid_json(): 31 | """ 32 | Unit test to check the validate_json function with invalid json_object inputs 33 | """ 34 | json_object = { 35 | "clientId": "", 36 | "clientSecret": "", 37 | "subscriptionId": "" 38 | } 39 | schema_object = azure_credentials_schema 40 | with pytest.raises(AMLConfigurationException): 41 | assert validate_json( 42 | data=json_object, 43 | schema=schema_object, 44 | input_name="PARAMETERS_FILE" 45 | ) 46 | 47 | 48 | def test_validate_json_invalid_schema(): 49 | """ 50 | Unit test to check the validate_json function with invalid schema inputs 51 | """ 52 | json_object = {} 53 | schema_object = {} 54 | with pytest.raises(Exception): 55 | assert validate_json( 56 | data=json_object, 57 | schema=schema_object, 58 | input_name="PARAMETERS_FILE" 59 | ) 60 | --------------------------------------------------------------------------------