├── .gitignore
├── APIDeliverables.md
├── AcceptanceCriteria.md
├── CODEOFCONDUCT.md
├── Containers
├── base-py
│ ├── Dockerfile
│ ├── README.md
│ ├── ai4e_api_tools
│ │ ├── ai4e_app_insights.py
│ │ ├── ai4e_app_insights_context.py
│ │ ├── ai4e_app_insights_wrapper.py
│ │ ├── ai4e_service.py
│ │ ├── azure_monitor_logger.py
│ │ └── task_management
│ │ │ └── api_task.py
│ └── requirements.txt
├── base-r
│ ├── Dockerfile
│ ├── ai4e_api_tools
│ │ ├── ai4e_app_insights.R
│ │ └── task_management
│ │ │ └── api_task.R
│ └── requirements.txt
├── blob-py
│ ├── Dockerfile
│ ├── blob_mount.json
│ ├── readme.txt
│ └── startup.sh
├── blob-r
│ ├── Dockerfile
│ ├── blob_mount.json
│ └── startup.sh
└── common
│ ├── aad_blob.py
│ ├── blob_mounting
│ └── blob_mounter.py
│ └── sas_blob.py
├── Documentation
└── landcover_api_spec_swagger.0.1.json
├── Examples
├── base-py
│ ├── Dockerfile
│ ├── runserver.py
│ ├── startup.sh
│ └── supervisord.conf
├── base-r
│ ├── Dockerfile
│ ├── my_api
│ │ ├── Observations.csv
│ │ ├── api_example.R
│ │ └── plumber_run.R
│ ├── startup.sh
│ └── supervisord.conf
├── blob-mount-py
│ ├── Dockerfile
│ ├── README.md
│ ├── blob_mount.json
│ ├── my_api
│ │ └── runserver.py
│ ├── startup.sh
│ └── supervisord.conf
├── helpers
│ └── aad_blob_helper.py
├── pytorch
│ ├── Dockerfile
│ ├── README.md
│ ├── pytorch_api
│ │ ├── pytorch_classifier.py
│ │ └── runserver.py
│ ├── startup.sh
│ └── supervisord.conf
├── screenshots
│ ├── CustomVisionIterationID.jpg
│ ├── CustomVisionSettings.jpg
│ ├── QuickstartResourceGroup.jpg
│ ├── api_key1.PNG
│ ├── api_key2.PNG
│ ├── app_insights1.PNG
│ ├── app_insights2.PNG
│ ├── app_insights3.PNG
│ ├── blob1.PNG
│ ├── blob4.PNG
│ ├── blob_key.PNG
│ ├── blob_upload.PNG
│ ├── create_ACR-1.png
│ ├── create_ACR-2.png
│ ├── create_ACR-3.png
│ ├── postman_header_content_type.png
│ ├── postman_json.PNG
│ ├── postman_pytorch1.PNG
│ ├── postman_pytorch2.PNG
│ ├── postman_pytorch_api.png
│ ├── postman_tf_api.png
│ ├── postman_tf_async_api.png
│ ├── resource_group.PNG
│ ├── resource_group_3.PNG
│ ├── run_ACI-1.png
│ ├── startup_fix.PNG
│ └── storage_explorer_tf_out.png
└── tensorflow
│ ├── Dockerfile
│ ├── README.md
│ ├── startup.sh
│ ├── supervisord.conf
│ └── tf_iNat_api
│ ├── runserver.py
│ └── tf_detector.py
├── JupyterNotebook.md
├── LICENSE
├── Notebooks
├── demo_image.jpg
├── hackathon.ipynb
├── lab_manual.ipynb
└── template-demo.ipynb
├── README.md
└── SECURITY.md
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.suo
8 | *.user
9 | *.userosscache
10 | *.sln.docstates
11 |
12 | # User-specific files (MonoDevelop/Xamarin Studio)
13 | *.userprefs
14 |
15 | # Build results
16 | [Dd]ebug/
17 | [Dd]ebugPublic/
18 | [Rr]elease/
19 | [Rr]eleases/
20 | x64/
21 | x86/
22 | bld/
23 | [Bb]in/
24 | [Oo]bj/
25 | [Ll]og/
26 |
27 | # Visual Studio 2015/2017 cache/options directory
28 | .vs/
29 | # Uncomment if you have tasks that create the project's static files in wwwroot
30 | #wwwroot/
31 |
32 | # Visual Studio 2017 auto generated files
33 | Generated\ Files/
34 |
35 | # MSTest test Results
36 | [Tt]est[Rr]esult*/
37 | [Bb]uild[Ll]og.*
38 |
39 | # NUNIT
40 | *.VisualState.xml
41 | TestResult.xml
42 |
43 | # Build Results of an ATL Project
44 | [Dd]ebugPS/
45 | [Rr]eleasePS/
46 | dlldata.c
47 |
48 | # Benchmark Results
49 | BenchmarkDotNet.Artifacts/
50 |
51 | # .NET Core
52 | project.lock.json
53 | project.fragment.lock.json
54 | artifacts/
55 | **/Properties/launchSettings.json
56 |
57 | # StyleCop
58 | StyleCopReport.xml
59 |
60 | # Files built by Visual Studio
61 | *_i.c
62 | *_p.c
63 | *_i.h
64 | *.ilk
65 | *.meta
66 | *.obj
67 | *.iobj
68 | *.pch
69 | *.pdb
70 | *.ipdb
71 | *.pgc
72 | *.pgd
73 | *.rsp
74 | *.sbr
75 | *.tlb
76 | *.tli
77 | *.tlh
78 | *.tmp
79 | *.tmp_proj
80 | *.log
81 | *.vspscc
82 | *.vssscc
83 | .builds
84 | *.pidb
85 | *.svclog
86 | *.scc
87 |
88 | # Chutzpah Test files
89 | _Chutzpah*
90 |
91 | # Visual C++ cache files
92 | ipch/
93 | *.aps
94 | *.ncb
95 | *.opendb
96 | *.opensdf
97 | *.sdf
98 | *.cachefile
99 | *.VC.db
100 | *.VC.VC.opendb
101 |
102 | # Visual Studio profiler
103 | *.psess
104 | *.vsp
105 | *.vspx
106 | *.sap
107 |
108 | # Visual Studio Trace Files
109 | *.e2e
110 |
111 | # TFS 2012 Local Workspace
112 | $tf/
113 |
114 | # Guidance Automation Toolkit
115 | *.gpState
116 |
117 | # ReSharper is a .NET coding add-in
118 | _ReSharper*/
119 | *.[Rr]e[Ss]harper
120 | *.DotSettings.user
121 |
122 | # JustCode is a .NET coding add-in
123 | .JustCode
124 |
125 | # TeamCity is a build add-in
126 | _TeamCity*
127 |
128 | # DotCover is a Code Coverage Tool
129 | *.dotCover
130 |
131 | # AxoCover is a Code Coverage Tool
132 | .axoCover/*
133 | !.axoCover/settings.json
134 |
135 | # Visual Studio code coverage results
136 | *.coverage
137 | *.coveragexml
138 |
139 | # NCrunch
140 | _NCrunch_*
141 | .*crunch*.local.xml
142 | nCrunchTemp_*
143 |
144 | # MightyMoose
145 | *.mm.*
146 | AutoTest.Net/
147 |
148 | # Web workbench (sass)
149 | .sass-cache/
150 |
151 | # Installshield output folder
152 | [Ee]xpress/
153 |
154 | # DocProject is a documentation generator add-in
155 | DocProject/buildhelp/
156 | DocProject/Help/*.HxT
157 | DocProject/Help/*.HxC
158 | DocProject/Help/*.hhc
159 | DocProject/Help/*.hhk
160 | DocProject/Help/*.hhp
161 | DocProject/Help/Html2
162 | DocProject/Help/html
163 |
164 | # Click-Once directory
165 | publish/
166 |
167 | # Publish Web Output
168 | *.[Pp]ublish.xml
169 | *.azurePubxml
170 | # Note: Comment the next line if you want to checkin your web deploy settings,
171 | # but database connection strings (with potential passwords) will be unencrypted
172 | *.pubxml
173 | *.publishproj
174 |
175 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
176 | # checkin your Azure Web App publish settings, but sensitive information contained
177 | # in these scripts will be unencrypted
178 | PublishScripts/
179 |
180 | # NuGet Packages
181 | *.nupkg
182 | # The packages folder can be ignored because of Package Restore
183 | **/[Pp]ackages/*
184 | # except build/, which is used as an MSBuild target.
185 | !**/[Pp]ackages/build/
186 | # Uncomment if necessary however generally it will be regenerated when needed
187 | #!**/[Pp]ackages/repositories.config
188 | # NuGet v3's project.json files produces more ignorable files
189 | *.nuget.props
190 | *.nuget.targets
191 |
192 | # Microsoft Azure Build Output
193 | csx/
194 | *.build.csdef
195 |
196 | # Microsoft Azure Emulator
197 | ecf/
198 | rcf/
199 |
200 | # Windows Store app package directories and files
201 | AppPackages/
202 | BundleArtifacts/
203 | Package.StoreAssociation.xml
204 | _pkginfo.txt
205 | *.appx
206 |
207 | # Visual Studio cache files
208 | # files ending in .cache can be ignored
209 | *.[Cc]ache
210 | # but keep track of directories ending in .cache
211 | !*.[Cc]ache/
212 |
213 | # Others
214 | ClientBin/
215 | ~$*
216 | *~
217 | *.dbmdl
218 | *.dbproj.schemaview
219 | *.jfm
220 | *.pfx
221 | *.publishsettings
222 | orleans.codegen.cs
223 |
224 | # Including strong name files can present a security risk
225 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
226 | #*.snk
227 |
228 | # Since there are multiple workflows, uncomment next line to ignore bower_components
229 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
230 | #bower_components/
231 |
232 | # RIA/Silverlight projects
233 | Generated_Code/
234 |
235 | # Backup & report files from converting an old project file
236 | # to a newer Visual Studio version. Backup files are not needed,
237 | # because we have git ;-)
238 | _UpgradeReport_Files/
239 | Backup*/
240 | UpgradeLog*.XML
241 | UpgradeLog*.htm
242 | ServiceFabricBackup/
243 | *.rptproj.bak
244 |
245 | # SQL Server files
246 | *.mdf
247 | *.ldf
248 | *.ndf
249 |
250 | # Business Intelligence projects
251 | *.rdl.data
252 | *.bim.layout
253 | *.bim_*.settings
254 | *.rptproj.rsuser
255 |
256 | # Microsoft Fakes
257 | FakesAssemblies/
258 |
259 | # GhostDoc plugin setting file
260 | *.GhostDoc.xml
261 |
262 | # Node.js Tools for Visual Studio
263 | .ntvs_analysis.dat
264 | node_modules/
265 |
266 | # Visual Studio 6 build log
267 | *.plg
268 |
269 | # Visual Studio 6 workspace options file
270 | *.opt
271 |
272 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
273 | *.vbw
274 |
275 | # Visual Studio LightSwitch build output
276 | **/*.HTMLClient/GeneratedArtifacts
277 | **/*.DesktopClient/GeneratedArtifacts
278 | **/*.DesktopClient/ModelManifest.xml
279 | **/*.Server/GeneratedArtifacts
280 | **/*.Server/ModelManifest.xml
281 | _Pvt_Extensions
282 |
283 | # Paket dependency manager
284 | .paket/paket.exe
285 | paket-files/
286 |
287 | # FAKE - F# Make
288 | .fake/
289 |
290 | # JetBrains Rider
291 | .idea/
292 | *.sln.iml
293 |
294 | # CodeRush
295 | .cr/
296 |
297 | # Python Tools for Visual Studio (PTVS)
298 | __pycache__/
299 | *.pyc
300 |
301 | # Cake - Uncomment if you are using it
302 | # tools/**
303 | # !tools/packages.config
304 |
305 | # Tabs Studio
306 | *.tss
307 |
308 | # Telerik's JustMock configuration file
309 | *.jmconfig
310 |
311 | # BizTalk build output
312 | *.btp.cs
313 | *.btm.cs
314 | *.odx.cs
315 | *.xsd.cs
316 |
317 | # OpenCover UI analysis results
318 | OpenCover/
319 |
320 | # Azure Stream Analytics local run output
321 | ASALocalRun/
322 |
323 | # MSBuild Binary and Structured Log
324 | *.binlog
325 |
326 | # NVidia Nsight GPU debugger configuration file
327 | *.nvuser
328 |
329 | # MFractors (Xamarin productivity tool) working folder
330 | .mfractor/
331 |
332 | Notebooks/.ipynb_checkpoints/template-demo-checkpoint.ipynb
333 |
334 | # Mac file system
335 | .DS_Store
336 |
337 | # Jupyter notebook
338 | .ipynb_checkpoints/
339 |
340 | # IDE
341 | .idea
--------------------------------------------------------------------------------
/APIDeliverables.md:
--------------------------------------------------------------------------------
1 | # AI for Earth - API Deliverables
2 |
3 | Select AI for Earth grant recipients are contributing AI for Earth APIs. If you are providing an API, here are the specific deliverables to submit:
4 |
5 | + [Container with machine learning model](#container)
6 | + [Jupyter notebook (for demo suite)](#notebook)
7 | + [Documentation](#doc)
8 | + [Assets for website](#assets)
9 |
10 | The container should be uploaded to a container registry (more details below). The Jupyter notebook, documentation, and website assets should be submitted via pull request to a private git repo at https://aiforearth.visualstudio.com/_git/AIforEarth-API-Push.
11 |
12 | Prior to submitting your work, you will need to contact us to provide the email address of the individual who will own uploading to the git repository above and to the container registry. This is in order to grant appropriate permissions. Please email aiforearthcommunity@microsoft.com with the subject line = "API push request" and the body of your email containing the email address of the person who will be responsible for submission.
13 |
14 | If you have questions, please contact aiforearthcommunity@microsoft.com.
15 |
16 |
17 | ## Container with machine learning model
18 | The actual delivery of your API can be done via a Docker container.
19 | + Please follow the directions [here](./Quickstart.md) to create the Docker container.
20 | + In step 8, when you build your Docker image, please tag it using the tag:
21 | “ai4egrantee.azurecr.io//-:”
22 | + Replace step 10-11 with publishing to our AI for Earth container registry. Use the following commands:
23 | ```
24 | docker login --username --password ai4egrantee.azurecr.io
25 | docker push ai4egrantee.azurecr.io//-:
26 | ```
27 | + Please send an email to aiforearthcommunity@microsoft.com with the subject line = "API push request" and the body of your email containing the email address of the person who will push the container (so we can grant that email address the appropriate permissions to push to our container registry).
28 | + In terms of testing, please ensure that your code meets the defined [acceptance criteria](./AcceptanceCriteria.md).
29 |
30 | **Alternate option:** People can either provide a container that meets the acceptance criteria, or they can relax/lower the bar on acceptance criteria and provide their source code with a semi-functional container.
31 |
32 |
33 | ## Jupyter Notebook
34 | We are compiling a suite of demos, to showcase the work of our AI for Earth grant recipients. These demos are intended for an audience of developers and data scientists, so they can see how to call your API and the type of results that your machine learning model returns. Please include sample data for calling your API that can be publicly shown.
35 | + Please follow the directions [here](./JupyterNotebook.md) to create a Jupyter notebook that can be used to demonstrate your amazing work.
36 | + We have also provided a [template notebook](./Notebooks/template-demo.ipynb) that you can start from.
37 |
38 |
39 | ## Documentation
40 | Of course, every good API needs documentation to show its usage. Please include any assumptions that your code makes (for example, all input images must be square tiles) and an example of how to call the API.
41 | + Please provide documentation of your API using the [OpenAPI specification](https://swagger.io/specification/), as a .json file.
42 | + We recommend that you build and validate it using the [Swagger Editor](https://editor.swagger.io/). You can start with the example that they provide or with [our landcover mapping documentation](./Documentation/landcover_api_spec_swagger.0.1.json) as an example.
43 | + The final product (rendered on the website) will look like this, for an example of useful information to include (click on the version numbers): https://aka.ms/aieapisdoc
44 |
45 | Additional resources that may be useful
46 | + This is the process that we will follow to import your API: https://docs.microsoft.com/en-us/azure/api-management/import-and-publish#a-namecreate-api-aimport-and-publish-a-backend-api
47 | + This link documents the API import restrictions and known issues for OpenAPI/Swagger: https://docs.microsoft.com/en-us/azure/api-management/api-management-api-import-restrictions
48 | + Important information and tips related to OpenAPI import: https://blogs.msdn.microsoft.com/apimanagement/2018/04/11/important-changes-to-openapi-import-and-export/
49 |
50 |
51 | ## Assets for website
52 | These assets could potentially be used on the AI for Earth website to highlight your API. For an example, see https://aka.ms/AI4EAPI.
53 |
54 | Please provide the following:
55 | + Image (high-resolution; we will crop to the right size)
56 | + Three-line summary of API (300 characters maximum)
57 | + Link (to follow on the “Learn about X”)
58 |
--------------------------------------------------------------------------------
/AcceptanceCriteria.md:
--------------------------------------------------------------------------------
1 | # AI for Earth Hosted Acceptance Criteria
2 | AI for Earth Grantees have the option of hosting their completed APIs on the official AI for Earth API platform. These APIs are subject to the AI for Earth acceptance criteria. Each of the following sections identify a requirement that must be met before the API is migrated to the AI for Earth hosting platform.
3 |
4 | ## Contents
5 | 1. [API Design](#API-Design)
6 | 2. [Testing](#Testing)
7 | 3. [Documentation](#Documentation)
8 | 4. [Kubernetes](#Kubernetes)
9 | 5. [Reliability](#Reliability)
10 | 6. [Devops](#Devops)
11 | 7. [Telemetry](#Telemetry)
12 | 8. [Publishing to the AI for Earth API Platform](#Publishing-to-the-AI-for-Earth-API-Platform)
13 |
14 | ## API Design
15 |
16 | ### Input validation
17 | Validation of API input must be performed prior to any processing. This ensures fail-fast, decreases unnecessary resource utilization, and provides immediate information to the caller.
18 |
19 | ### Fast response
20 | For long-running/async APIs, a task id must be immediately returned to the caller. Ensure that threading or parallel processing is utilized.
21 |
22 | For synchronous APIs, a result must be returned in fewer than 5 seconds. If not, the API shall be converted to an async API and must utilize the task manager.
23 |
24 | ### Stateless or distributed state
25 | All APIs must maintain a stateless design among requests. If state is required between requests, a distributed state system must be implemented. This state system must be atomic and lock-free in nature and must be able to handle loads associated with the API.
26 |
27 | ## Testing
28 | APIs are to be tested in the following categories and, when applicable, results submitted with the request for acceptance.
29 |
30 | ### Functional
31 | - Test functionality against requirements and API design specification
32 | - Test typical use cases
33 | - Test edge cases, including any possible out-of-bounds input
34 | - Test very large inputs and very small inputs
35 | - Test empty input
36 |
37 | ### Bad requests
38 | - Test unsupported REST methods
39 | - Test bad input
40 | - Test permission issues for SAS Blobs, etc.
41 |
42 | ### Performance
43 | - Test for timeouts when handling large inputs
44 | - Gather metrics relating to running on GPU vs CPU
45 | - Ensure that all failure points occur early in execution (fail-fast)
46 | - Tune based on performance testing
47 |
48 | ### Load
49 | - Gather metrics for incremental loads
50 | - Identify maximum load for a single instance
51 |
52 | ## Documentation
53 | Several areas of the API require documentation. The documentation must be versioned with the API.
54 |
55 | ### Functional
56 | - Document the purpose, use cases, and end-to-end scenarios
57 | - Document relationship between input and output
58 | - Document typical usage and edge case usage
59 |
60 | ### API Swagger
61 | - The API must be Swagger documented, which includes input, output, schemas, etc.
62 | - Swagger documentation must include sample inputs.
63 |
64 | ### Example input
65 | - If the API requires input other than JSON, example input (files, etc.) must be provided.
66 | - Example input must cover all computation possibilities.
67 |
68 | ### Usage instructions
69 | Step-by-step usage instructions must be provided, which shall include:
70 | - How to generate input data
71 | - How to utilize output
72 | - How to interpret statuses
73 | - How to interpret error conditions
74 | - How to fix errors
75 |
76 | #### Integration
77 | Document all integration points with external sources:
78 | - How to generate input with external tools, software, etc.
79 | - How to utilize output with external tools, software, etc.
80 | - How to utilize other APIs or software to create end-to-end capabilities
81 |
82 | ## Kubernetes
83 | We host APIs in a custom Kubernetes cluster. To ensure API availability and dynamic scaling, information needs to be provided for the following criteria.
84 |
85 | ### Resource consumption targets
86 | - Typical CPU usage for a single request
87 | - Maximum CPU usage for a single request
88 | - GPU execution time
89 | - CPU execution time
90 | - Typical memory usage for a single request
91 | - Maximum memory usage for a single request
92 |
93 | ### Docker reports health status
94 | - The API must contain an endpoint that returns a health check. This health check must be added to the Dockerfile, such as:
95 | ```Dockerfile
96 | HEALTHCHECK --interval=1m --timeout=3s --start-period=20s \
97 | CMD curl -f http://localhost/ || exit 1
98 | ```
99 |
100 | ### Scaling targets
101 | Based on the load of a single request (typical and largest), estimate the resource thresholds that indicate that the service must be scaled up and down.
102 |
103 | ## Reliability
104 |
105 | ### Non-recycling
106 | A best effort shall be made to prevent an instance from continuous recycling.
107 |
108 | ### Fast recovery
109 | Stagger the loading of large objects into memory such that an instance can quickly respond to requests upon startup.
110 |
111 | ## Devops
112 | Since AI for Earth will be assuming initial DevOps, detailed instructions shall be provided for the following categories.
113 |
114 | ### Debugging instructions
115 | Document all known possible failure cases and how to resolve them. Document in the form of a playbook, where the case is identified and step-by-step directions, with code is provided.
116 |
117 | ### Test sample data
118 | Provide several datasets that can be used for acceptance testing, load testing, stress testing, and functional testing.
119 |
120 | ### Setup/deployment instructions/requirements
121 | Any custom setup instructions, along with required architectural components, must be clearly documented.
122 |
123 | ### Contact information (devops)
124 | Provide, at least, three contacts for additional DevOps support. This shall include a phone number, Email address, Slack/Teams channel, etc.
125 |
126 | ## Usage permissions
127 | Clearly identify any restrictions for API usage. This includes denoting any sensitive issues.
128 |
129 | ### API key distribution requirements
130 | The following questions shall be answered:
131 | - What restrictions exist for API usage?
132 | - Who makes decisions on key approvals? Provide contact information.
133 |
134 | ## Telemetry
135 | AI for Earth collects telemetry from our back-end system in order to provide a reliable service. No PII (personally identifiable information) is collected by AI for Earth.
136 |
137 | ### State/heartbeat reporting
138 | The API shall include an endpoint that reports the state of the service.
139 |
140 | ### Performance
141 | Performance metrics, along with trace logging, must be included. The trace logging shall include execution time. To aid in performance requirement identification, input size, etc. should be included with the trace log.
142 |
143 | ### External sink
144 | Telemetry is collected by AI for Earth for our back-end system. Additional logging can be sent to the API owner's Application Insights instance. Provide this information so that logs can be distributed.
145 |
146 | ### No PII
147 | Absolutely no PII (personally identifiable information) shall be collected by the API.
148 |
149 | ### Alerting
150 | Identify alert conditions based on the collected telemetry. Include the response to such alerts in the DevOps playbook.
151 |
152 | ## Publishing to the AI for Earth API Platform
153 | An AI for Earth engineer must perform the publishing to the platform, but before they can do so, the your image must be made available to the AI for Earth team. Please follow these steps to complete this process.
154 |
155 | 1. [Create an Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli) in your Azure subscription.
156 | 2. [Tag and push](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli#push-image-to-registry) your image to the repository. The images must be versioned. Internally, we use the following naming pattern:
157 | ```
158 | .azurecr.io//-:
159 | ```
160 | 3. Contact the AI for Earth Engineering Team to obtain the AI for Earth publisher [Azure Active Directory application name](https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). This application will be used to deploy your API image to the AI for Earth API Platform.
161 | 4. [Grant 'AcrPull' role access](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication#service-principal) to the Azure AD application from step 3.
162 | 5. Ensure that your API's documentation is up-to-date and the API has been fully tested and verified.
163 | 6. Notify the AI for Earth Engineering Team of your intention to deploy to production.
--------------------------------------------------------------------------------
/CODEOFCONDUCT.md:
--------------------------------------------------------------------------------
1 | Microsoft Open Source Code of Conduct
2 |
3 | This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community.
4 |
5 | Our open source communities strive to:
6 |
7 | Be friendly and patient: Remember you might not be communicating in someone else's primary spoken or programming language, and others may not have your level of understanding.
8 | Be welcoming: Our communities welcome and support people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, color, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
9 | Be respectful: We are a world-wide community of professionals, and we conduct ourselves professionally. Disagreement is no excuse for poor behavior and poor manners. Disrespectful and unacceptable behavior includes, but is not limited to:
10 | Violent threats or language.
11 | Discriminatory or derogatory jokes and language.
12 | Posting sexually explicit or violent material.
13 | Posting, or threatening to post, people's personally identifying information ("doxing").
14 | Insults, especially those using discriminatory terms or slurs.
15 | Behavior that could be perceived as sexual attention.
16 | Advocating for or encouraging any of the above behaviors.
17 | Understand disagreements: Disagreements, both social and technical, are useful learning opportunities. Seek to understand the other viewpoints and resolve differences constructively.
18 | This code is not exhaustive or complete. It serves to capture our common understanding of a productive, collaborative environment. We expect the code to be followed in spirit as much as in the letter.
19 |
20 | Scope
21 |
22 | This code of conduct applies to all repos and communities for Microsoft-managed open source projects regardless of whether or not the repo explicitly calls out its use of this code. The code also applies in public spaces when an individual is representing a project or its community. Examples include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
23 |
24 | Note: Some Microsoft-managed communities have codes of conduct that pre-date this document and issue resolution process. While communities are not required to change their code, they are expected to use the resolution process outlined here. The review team will coordinate with the communities involved to address your concerns.
25 | Reporting Code of Conduct Issues
26 |
27 | We encourage all communities to resolve issues on their own whenever possible. This builds a broader and deeper understanding and ultimately a healthier interaction. In the event that an issue cannot be resolved locally, please feel free to report your concerns by contacting opencode@microsoft.com. Your report will be handled in accordance with the issue resolution process described in the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/).
28 |
29 | In your report please include:
30 |
31 | - Your contact information.
32 | - Names (real, usernames or pseudonyms) of any individuals involved. If there are additional witnesses, please include them as well.
33 | - Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public chat log), please include a link or attachment.
34 | - Any additional information that may be helpful.
35 |
36 | All reports will be reviewed by a multi-person team and will result in a response that is deemed necessary and appropriate to the circumstances. Where additional perspectives are needed, the team may seek insight from others with relevant expertise or experience. The confidentiality of the person reporting the incident will be kept at all times. Involved parties are never part of the review team.
37 |
38 | Anyone asked to stop unacceptable behavior is expected to comply immediately. If an individual engages in unacceptable behavior, the review team may take any action they deem appropriate, including a permanent ban from the community.
39 |
40 | This code of conduct is based on the [template](http://todogroup.org/opencodeofconduct) established by the TODO Group and used by numerous other large communities (e.g., Facebook, Yahoo, Twitter, GitHub) and the Scope section from the Contributor Covenant version 1.4.
41 |
--------------------------------------------------------------------------------
/Containers/base-py/Dockerfile:
--------------------------------------------------------------------------------
1 | #mcr.microsoft.com/aiforearth/base-py:version
2 | ARG BASE_IMAGE=nvidia/cuda:9.2-runtime-ubuntu16.04
3 | FROM $BASE_IMAGE
4 |
5 | ENV PATH /usr/local/envs/ai4e_py_api/bin:$PATH
6 | ENV PYTHONPATH="${PYTHONPATH}:/ai4e_api_tools"
7 |
8 | RUN export LANG=C.UTF-8
9 | RUN export LC_ALL=C.UTF-8
10 |
11 | RUN apt-get update --fix-missing \
12 | && apt-get install -y software-properties-common \
13 | && add-apt-repository ppa:deadsnakes/ppa \
14 | && apt-get update \
15 | && apt-get install -y apt-utils \
16 | supervisor \
17 | curl \
18 | bzip2 \
19 | ca-certificates \
20 | libglib2.0-0 \
21 | libxext6 \
22 | libsm6 \
23 | libxrender1 \
24 | python3-setuptools \
25 | python3.7-dev \
26 | python3.7 \
27 | apt-transport-https \
28 | build-essential \
29 | gcc
30 |
31 | RUN apt-get update && apt-get install python3-pip -y
32 | RUN python3.7 -m pip install pip --upgrade
33 |
34 | COPY ./base-py/requirements.txt /ai4e_api_tools/
35 | WORKDIR /ai4e_api_tools
36 | RUN pip3 install -r ./requirements.txt
37 |
38 | # Install Azure Blob SDK
39 | RUN pip3 install azure-mgmt-storage \
40 | azure-storage-blob \
41 | azure-identity
42 |
43 | # Install Application Insights Opencensus packages
44 | RUN pip3 install opencensus-ext-azure \
45 | opencensus-ext-flask
46 |
47 | COPY ./base-py/ai4e_api_tools /ai4e_api_tools/
48 | COPY ./common/sas_blob.py /ai4e_api_tools/
49 | COPY ./common/aad_blob.py /ai4e_api_tools/
50 |
--------------------------------------------------------------------------------
/Containers/base-py/README.md:
--------------------------------------------------------------------------------
1 | ## Create a custom Python base image with a different CUDA version
2 |
3 | The `base-py` image hosted by the AI for Earth team on the mcr.microsoft.com registry has CUDA 9.2 installed, so any images you create based on it will have this CUDA version. You are welcome to build your own base image with another version of CUDA, in the following way.
4 |
5 | 1. Navigate to `AIforEarth-API-Development/Containers` (one level above this directory).
6 |
7 | 2. Run `docker build` with a custom base image with your desired CUDA version. Specify this base image in the `BASE_IMAGE` argument (in this example, `nvidia/cuda:9.0-cudnn7-runtime-ubuntu16.04` is used as the base image):
8 | ```
9 | docker build . -f base-py/Dockerfile --build-arg BASE_IMAGE=nvidia/cuda:9.0-cudnn7-runtime-ubuntu16.04 -t .azurecr.io/aiforearth/base-py-cuda-90:1.2
10 | ```
11 |
12 | 3. You can now specify the resulting image as your base image in your API's `Dockerfile`.
13 |
14 | 4. Push your base image to your registry so you can reuse it for other projects.
15 |
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/ai4e_app_insights.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | # This is a wrapper on top of the normal Application Insights Python SDKs.
4 | # The only addition is an AI4E instrumentation key that only gets added
5 | # when the service is hosted by Microsoft in our AI for Earth API system.
6 |
7 | from os import getenv
8 | import socket
9 |
10 | from applicationinsights import TelemetryClient
11 | from applicationinsights.channel import AsynchronousSender
12 | from applicationinsights.channel import AsynchronousQueue
13 | from applicationinsights.channel import TelemetryChannel
14 | from applicationinsights.logging import LoggingHandler
15 | from applicationinsights.requests import WSGIApplication
16 |
17 | from ai4e_app_insights_context import AI4ETelemetryContext
18 |
19 |
20 | CONF_PREFIX = "APPINSIGHTS"
21 |
22 | APPINSIGHTS_INSTRUMENTATIONKEY = CONF_PREFIX + "_INSTRUMENTATIONKEY"
23 | CONF_ENDPOINT_URI = CONF_PREFIX + "_ENDPOINT_URI"
24 | CONF_DISABLE_REQUEST_LOGGING = CONF_PREFIX + "_DISABLE_REQUEST_LOGGING"
25 | CONF_DISABLE_TRACE_LOGGING = CONF_PREFIX + "_DISABLE_TRACE_LOGGING"
26 | CONF_DISABLE_EXCEPTION_LOGGING = CONF_PREFIX + "_DISABLE_EXCEPTION_LOGGING"
27 |
28 |
29 | class AppInsights(object):
30 | """ This class represents a Flask extension that enables request telemetry,
31 | trace logging and exception logging for a Flask application. The telemetry
32 | will be sent to Application Insights service using the supplied
33 | instrumentation key.
34 |
35 | The following Flask config variables can be used to configure the extension:
36 |
37 | - Set ``APPINSIGHTS_INSTRUMENTATIONKEY`` to a string to provide the
38 | instrumentation key to send telemetry to application insights.
39 | Alternatively, this value can also be provided via an environment variable
40 | of the same name.
41 |
42 | - Set ``APPINSIGHTS_ENDPOINT_URI`` to a string to customize the telemetry
43 | endpoint to which Application Insights will send the telemetry.
44 |
45 | - Set ``APPINSIGHTS_DISABLE_REQUEST_LOGGING`` to ``False`` to disable
46 | logging of Flask requests to Application Insights.
47 |
48 | - Set ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` to ``False`` to disable logging
49 | of all log traces to Application Insights.
50 |
51 | - Set ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING`` to ``False`` to disable
52 | logging of all exceptions to Application Insights.
53 |
54 | .. code:: python
55 |
56 | from flask import Flask
57 | from ai4e_app_insights import AppInsights
58 |
59 | # instantiate the Flask application
60 | app = Flask(__name__)
61 | app.config['APPINSIGHTS_INSTRUMENTATIONKEY'] = ''
62 |
63 | # log requests, traces and exceptions to the Application Insights service
64 | appinsights = AppInsights(app)
65 |
66 | # define a simple route
67 | @app.route('/')
68 | def hello_world():
69 | return 'Hello World!'
70 |
71 | # run the application
72 | if __name__ == '__main__':
73 | app.run()
74 | """
75 | def __init__(self, app=None, context=None):
76 | """
77 | Initialize a new instance of the extension.
78 |
79 | Args:
80 | app (flask.Flask). the Flask application for which to initialize the extension.
81 | """
82 | socket.setdefaulttimeout(30)
83 | self._appinsights_key = None
84 | self._endpoint_uri = None
85 | self._channel = None
86 | self._requests_middleware = None
87 | self._trace_log_handler_grantee = None
88 | self._trace_log_handler_ai4e = None
89 | self._exception_telemetry_client_grantee = None
90 | self._exception_telemetry_client_ai4e = None
91 |
92 | if app:
93 | self.init_app(app, context)
94 |
95 | def init_app(self, app, context):
96 | """
97 | Initializes the extension for the provided Flask application.
98 |
99 | Args:
100 | app (flask.Flask). the Flask application for which to initialize the extension.
101 | """
102 | print("Starting application insights module.")
103 | self._appinsights_key = app.config.get(APPINSIGHTS_INSTRUMENTATIONKEY) or getenv(APPINSIGHTS_INSTRUMENTATIONKEY)
104 |
105 | if (self._appinsights_key and len(self._appinsights_key.strip()) > 0):
106 | self._appinsights_key = self._appinsights_key.strip()
107 | else:
108 | self._appinsights_key = None
109 |
110 | # Set the application insights key for production use.
111 | if self._appinsights_key:
112 | print("Application insights key set.")
113 |
114 | self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
115 |
116 | if self._endpoint_uri:
117 | sender = AsynchronousSender(self._endpoint_uri)
118 | else:
119 | sender = AsynchronousSender()
120 |
121 | queue = AsynchronousQueue(sender)
122 |
123 | if not context:
124 | context = AI4ETelemetryContext()
125 |
126 | self._channel = TelemetryChannel(context, queue)
127 |
128 | self._init_request_logging(app)
129 | self._init_trace_logging(app)
130 | self._init_exception_logging(app)
131 |
132 | def _init_request_logging(self, app):
133 | """
134 | Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
135 | is set in the Flask config.
136 |
137 | Args:
138 | app (flask.Flask). the Flask application for which to initialize the extension.
139 | """
140 | enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
141 |
142 | if not enabled:
143 | return
144 |
145 | wsgi_key = self._appinsights_key
146 |
147 | self._requests_middleware = WSGIApplication(
148 | wsgi_key, app.wsgi_app, telemetry_channel=self._channel)
149 |
150 | app.wsgi_app = self._requests_middleware
151 |
152 | def _init_trace_logging(self, app):
153 | """
154 | Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
155 | set in the Flask config.
156 |
157 | Args:
158 | app (flask.Flask). the Flask application for which to initialize the extension.
159 | """
160 | enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
161 |
162 | if not enabled:
163 | return
164 |
165 | if self._appinsights_key:
166 | self._trace_log_handler_grantee = LoggingHandler(
167 | self._appinsights_key, telemetry_channel=self._channel)
168 |
169 | app.logger.addHandler(self._trace_log_handler_grantee)
170 |
171 | def _init_exception_logging(self, app):
172 | """
173 | Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
174 | is set in the Flask config.
175 |
176 | Args:
177 | app (flask.Flask). the Flask application for which to initialize the extension.
178 | """
179 | enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
180 |
181 | if not enabled:
182 | return
183 |
184 | if self._appinsights_key:
185 | self._exception_telemetry_client_grantee = TelemetryClient(
186 | self._appinsights_key, telemetry_channel=self._channel)
187 |
188 | @app.errorhandler(Exception)
189 | def exception_handler(exception):
190 | try:
191 | raise exception
192 | except Exception:
193 | if self._exception_telemetry_client_grantee:
194 | self._exception_telemetry_client_grantee.track_exception()
195 |
196 | if self._exception_telemetry_client_ai4e:
197 | self._exception_telemetry_client_ai4e.track_exception()
198 | finally:
199 | raise exception
200 |
201 | def flush(self):
202 | """Flushes the queued up telemetry to the service.
203 | """
204 | print("trying all flush")
205 | if self._requests_middleware:
206 | self._requests_middleware.flush()
207 |
208 | if self._trace_log_handler_grantee:
209 | self._trace_log_handler_grantee.flush()
210 |
211 | if self._trace_log_handler_ai4e:
212 | print("Trying trace flush...")
213 | self._trace_log_handler_ai4e.flush()
214 | print("Trace flush finsihed.")
215 |
216 | if self._exception_telemetry_client_grantee:
217 | self._exception_telemetry_client_grantee.flush()
218 |
219 | if self._exception_telemetry_client_ai4e:
220 | self._exception_telemetry_client_ai4e.flush()
221 |
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/ai4e_app_insights_context.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | import platform
4 | import locale
5 | from os import getenv
6 | from applicationinsights.channel import contracts
7 |
8 | CONF_SERVICE_OWNER = "SERVICE_OWNER"
9 | CONF_SERVICE_NAME = "SERVICE_NAME"
10 | CONF_SERVICE_VERSION = "SERVICE_VERSION"
11 | CONF_SERVICE_CLUSTER = "SERVICE_CLUSTER"
12 | CONF_SERVICE_MODEL_NAME = "SERVICE_MODEL_NAME"
13 | CONF_SERVICE_MODEL_FRAMEWORK = "SERVICE_MODEL_FRAMEWORK"
14 | CONF_SERVICE_MODEL_FRAMEOWRK_VERSION = "SERVICE_MODEL_FRAMEOWRK_VERSION"
15 | CONF_SERVICE_MODEL_VERSION = "SERVICE_MODEL_VERSION"
16 | CONF_SERVICE_CONTAINER_VERSION = "SERVICE_CONTAINER_VERSION"
17 | CONF_SERVICE_CONTAINER_NAME = "SERVICE_CONTAINER_NAME"
18 |
19 | # save off whatever is currently there
20 | existing_device_initialize = contracts.Device._initialize
21 | def device_initialize(self):
22 | """ The device initializer used to assign special properties to all device context objects"""
23 | existing_device_initialize(self)
24 | self.type = 'Other'
25 | self.id = platform.node()
26 | self.os_version = platform.version()
27 | self.locale = locale.getdefaultlocale()[0]
28 |
29 | # assign the device context initializer
30 | contracts.Device._initialize = device_initialize
31 |
32 | class AI4ETelemetryContext(object):
33 | """Represents the context for sending telemetry to the Application Insights service.
34 | """
35 | def __init__(self):
36 | """Initializes a new instance of the class.
37 | """
38 | self.type = 'Service'
39 | self.platform_node = platform.node()
40 | self.platform_os_version = platform.version()
41 | self.platform_locale = locale.getdefaultlocale()[0]
42 | self.platform_processor = platform.processor()
43 | self.platform_python_version = platform.python_version()
44 |
45 | self.device = contracts.Device()
46 | self.cloud = contracts.Cloud()
47 | self.application = contracts.Application()
48 | self.user = contracts.User()
49 | self.session = contracts.Session()
50 | self.operation = contracts.Operation()
51 | self.location = contracts.Location()
52 |
53 | self.service_owner = getenv(CONF_SERVICE_OWNER)
54 | self.service_name = getenv(CONF_SERVICE_NAME)
55 | self.service_version = getenv(CONF_SERVICE_VERSION)
56 | self.service_cluster = getenv(CONF_SERVICE_CLUSTER)
57 | self.service_model_name = getenv(CONF_SERVICE_MODEL_NAME)
58 | self.service_model_framework = getenv(CONF_SERVICE_MODEL_FRAMEWORK)
59 | self.service_model_framework_version = getenv(CONF_SERVICE_MODEL_FRAMEOWRK_VERSION)
60 | self.service_model_version = getenv(CONF_SERVICE_MODEL_VERSION)
61 | self.service_container_version = getenv(CONF_SERVICE_CONTAINER_VERSION)
62 | self.service_container_name = getenv(CONF_SERVICE_CONTAINER_NAME)
63 |
64 | self._properties = {
65 | 'service_owner': getenv(CONF_SERVICE_OWNER),
66 | 'service_name': getenv(CONF_SERVICE_NAME),
67 | 'service_version': getenv(CONF_SERVICE_VERSION),
68 | 'service_cluster': getenv(CONF_SERVICE_CLUSTER),
69 | 'service_model_name': getenv(CONF_SERVICE_MODEL_NAME),
70 | 'service_model_framework': getenv(CONF_SERVICE_MODEL_FRAMEWORK),
71 | 'service_model_framework_version': getenv(CONF_SERVICE_MODEL_FRAMEOWRK_VERSION),
72 | 'service_model_version': getenv(CONF_SERVICE_MODEL_VERSION),
73 | 'service_container_version': getenv(CONF_SERVICE_CONTAINER_VERSION),
74 | 'service_container_name': getenv(CONF_SERVICE_CONTAINER_NAME)
75 | }
76 |
77 | @property
78 | def properties(self):
79 | """The property context. This contains free-form properties that you can add to your telemetry.
80 | Returns:
81 | (dict). the context object.
82 | """
83 | return self._properties
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/ai4e_app_insights_wrapper.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | # This libary is used to wrap many of the AzureLogHandler and metrics_exporter
4 | # required objects into an easily usable package.
5 |
6 | from os import getenv
7 | import logging
8 | from opencensus.ext.azure import metrics_exporter
9 | from opencensus.ext.azure.log_exporter import AzureLogHandler
10 | from opencensus.stats import aggregation as aggregation_module
11 | from opencensus.stats import measure as measure_module
12 | from opencensus.tags import tag_map as tag_map_module
13 | from opencensus.stats import view as view_module
14 | from opencensus.stats import stats as stats_module
15 |
16 | APPINSIGHTS_INSTRUMENTATIONKEY = "APPINSIGHTS_INSTRUMENTATIONKEY"
17 |
18 | stats = stats_module.stats
19 | view_manager = stats.view_manager
20 | stats_recorder = stats.stats_recorder
21 |
22 | class AI4EAppInsights(object):
23 | def __init__(self):
24 | self.metrics = {}
25 | self.logger = logging.getLogger(__name__)
26 |
27 | self.appinsights_key = None
28 | raw_key = getenv(APPINSIGHTS_INSTRUMENTATIONKEY, None)
29 | if (raw_key and len(raw_key.strip()) > 0):
30 | self.appinsights_key = raw_key.strip()
31 |
32 | if (self.appinsights_key):
33 | handler = AzureLogHandler(connection_string='InstrumentationKey=' + str(getenv('APPINSIGHTS_INSTRUMENTATIONKEY')))
34 | self.logger.addHandler(handler)
35 | exporter = metrics_exporter.new_metrics_exporter(connection_string='InstrumentationKey=' + str(getenv('APPINSIGHTS_INSTRUMENTATIONKEY')))
36 | view_manager.register_exporter(exporter)
37 |
38 | def _log(self, message, sev, taskId = None, additionalProperties = None):
39 | if (taskId):
40 | if (additionalProperties is None):
41 | additionalProperties = { 'task_id': taskId }
42 | else:
43 | additionalProperties['task_id'] = taskId
44 |
45 | self.logger.log(sev, message, extra=additionalProperties)
46 |
47 | def log_debug(self, message, taskId = None, additionalProperties = None):
48 | self._log(message, 10, taskId, additionalProperties)
49 |
50 | def log_info(self, message, taskId = None, additionalProperties = None):
51 | self._log(message, 20, taskId, additionalProperties)
52 |
53 | def log_warn(self, message, taskId = None, additionalProperties = None):
54 | self._log(message, 30, taskId, additionalProperties)
55 |
56 | def log_error(self, message, taskId = None, additionalProperties = None):
57 | self._log(message, 40, taskId, additionalProperties)
58 |
59 | def log_exception(self, message, taskId = None, additionalProperties = None):
60 | self._log(message, 50, taskId, additionalProperties)
61 |
62 | def track_metric(self, metric_name, metric_value):
63 | if (self.appinsights_key):
64 | print("Tracking metric:" + metric_name + ", Value: " + str(metric_value))
65 |
66 | if (not metric_name in self.metrics):
67 | metrics_measure = measure_module.MeasureInt(metric_name, metric_name, metric_name)
68 | metrics_view = view_module.View(metric_name, metric_name, [], metrics_measure, aggregation_module.LastValueAggregation(value=metric_value))
69 |
70 | view_manager.register_view(metrics_view)
71 | mmap = stats_recorder.new_measurement_map()
72 | tmap = tag_map_module.TagMap()
73 |
74 | self.metrics[metric_name] = {'measure': metrics_measure, 'measurement_map': mmap, 'tag_map': tmap}
75 |
76 | measure = self.metrics[metric_name]['measure']
77 | mmap = self.metrics[metric_name]['measurement_map']
78 | tmap = self.metrics[metric_name]['tag_map']
79 | print("Putting metric:" + metric_name + ", Value: " + str(metric_value))
80 | mmap.measure_int_put(measure, metric_value)
81 | mmap.record(tmap)
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/ai4e_service.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | from threading import Thread
4 | from os import getenv
5 | import json
6 | import traceback
7 |
8 | from flask import Flask, abort, request, current_app, views
9 | from flask_restful import Resource, Api
10 | import signal
11 | from task_management.api_task import TaskManager
12 | import sys
13 | from functools import wraps
14 | from werkzeug.exceptions import HTTPException
15 | from ai4e_app_insights_wrapper import AI4EAppInsights
16 |
17 | disable_request_metric = getenv('DISABLE_CURRENT_REQUEST_METRIC', 'False')
18 |
19 | MAX_REQUESTS_KEY_NAME = 'max_requests'
20 | CONTENT_TYPE_KEY_NAME = 'content_types'
21 | CONTENT_MAX_KEY_NAME = 'content_max_length'
22 |
23 | APP_INSIGHTS_REQUESTS_KEY_NAME = 'REJECTED_STATE'
24 |
25 | class Task(Resource):
26 | def __init__(self, **kwargs):
27 | self.task_mgr = kwargs['task_manager']
28 |
29 | def get(self, id):
30 | st = self.task_mgr.GetTaskStatus(str(id))
31 | return(st)
32 |
33 | class APIService():
34 | def __init__(self, flask_app, logger):
35 | self.app = flask_app
36 | self.log = logger
37 | self.api = Api(self.app)
38 | self.is_terminating = False
39 | self.func_properties = {}
40 | self.func_request_counts = {}
41 | self.api_prefix = getenv('API_PREFIX')
42 | if not isinstance(self.log, AI4EAppInsights):
43 | self.tracer = self.log.tracer
44 |
45 | self.api_task_manager = TaskManager()
46 | signal.signal(signal.SIGINT, self.initialize_term)
47 |
48 | # Add health check endpoint
49 | self.app.add_url_rule(self.api_prefix + '/', view_func = self.health_check, methods=['GET'])
50 | print("Adding url rule: " + self.api_prefix + '/')
51 | # Add task endpoint
52 | self.api.add_resource(Task, self.api_prefix + '/task/', resource_class_kwargs={ 'task_manager': self.api_task_manager })
53 | print("Adding url rule: " + self.api_prefix + '/task/')
54 |
55 | self.app.before_request(self.before_request)
56 |
57 | def health_check(self):
58 | print("Health check call successful.")
59 | return 'Health check OK'
60 |
61 | def api_func(self, is_async, api_path, methods, request_processing_function, maximum_concurrent_requests, content_types = None, content_max_length = None, trace_name = None, *args, **kwargs):
62 | def decorator_api_func(func):
63 | if not self.api_prefix + api_path in self.func_properties:
64 | self.func_properties[self.api_prefix + api_path] = {MAX_REQUESTS_KEY_NAME: maximum_concurrent_requests, CONTENT_TYPE_KEY_NAME: content_types, CONTENT_MAX_KEY_NAME: content_max_length}
65 | self.func_request_counts[self.api_prefix + api_path] = 0
66 |
67 | @wraps(func)
68 | def api(*args, **kwargs):
69 | internal_args = {"func": func, "api_path": api_path}
70 |
71 | if request_processing_function:
72 | return_values = request_processing_function(request)
73 | combined_kwargs = {**internal_args, **kwargs, **return_values}
74 | else:
75 | combined_kwargs = {**internal_args, **kwargs}
76 |
77 | if is_async:
78 | task_info = self.api_task_manager.AddTask(request)
79 | taskId = str(task_info['TaskId'])
80 | combined_kwargs["taskId"] = taskId
81 |
82 | self.wrap_async_endpoint(trace_name, *args, **combined_kwargs)
83 | return 'TaskId: ' + taskId
84 | else:
85 | return self.wrap_sync_endpoint(trace_name, *args, **combined_kwargs)
86 |
87 | api.__name__ = 'api_' + api_path.replace('/', '')
88 | print("Adding url rule: " + self.api_prefix + api_path + ", " + api.__name__)
89 | self.app.add_url_rule(self.api_prefix + api_path, view_func = api, methods=methods, provide_automatic_options=True)
90 | return decorator_api_func
91 |
92 | def api_async_func(self, api_path, methods, request_processing_function = None, maximum_concurrent_requests = None, content_types = None, content_max_length = None, trace_name = None, *args, **kwargs):
93 | is_async = True
94 | return self.api_func(is_async, api_path, methods, request_processing_function, maximum_concurrent_requests, content_types, content_max_length, trace_name, *args, **kwargs)
95 |
96 | def api_sync_func(self, api_path, methods, request_processing_function = None, maximum_concurrent_requests = None, content_types = None, content_max_length = None, trace_name=None, *args, **kwargs):
97 | is_async = False
98 | return self.api_func(is_async, api_path, methods, request_processing_function, maximum_concurrent_requests, content_types, content_max_length, trace_name, *args, **kwargs)
99 |
100 | def initialize_term(self, signum, frame):
101 | print('Signal handler called with signal: ' + signum)
102 | print('SIGINT received, service is terminating and will no longer accept requests.')
103 | self.is_terminating = True
104 |
105 | def before_request(self):
106 | # Don't accept a request if SIGTERM has been called on this instance.
107 | if (self.is_terminating):
108 | print('Process is being terminated. Request has been denied.')
109 | abort(503, {'message': 'Service is busy, please try again later.'})
110 |
111 | if request.path in self.func_properties:
112 | if (self.func_properties[request.path][CONTENT_TYPE_KEY_NAME] and not request.content_type in self.func_properties[request.path][CONTENT_TYPE_KEY_NAME]):
113 | print('Invalid content type. Request has been denied.')
114 | abort(401, {'message': 'Content-type must be ' + str(self.func_properties[request.path][CONTENT_TYPE_KEY_NAME])})
115 |
116 | if (self.func_properties[request.path][CONTENT_MAX_KEY_NAME] and request.content_length > self.func_properties[request.path][CONTENT_MAX_KEY_NAME]):
117 | print('Request is too large. Request has been denied.')
118 | abort(413, {'message': 'Request content too large (' + str(request.content_length) + "). Must be smaller than: " + str(self.func_properties[request.path][CONTENT_MAX_KEY_NAME])})
119 |
120 | denied_request=0
121 | if (self.func_request_counts[request.path] + 1 > self.func_properties[request.path][MAX_REQUESTS_KEY_NAME]):
122 | print('Current requests: ' + str(self.func_request_counts[request.path] + 1))
123 | print('Max requests: ' + str(self.func_properties[request.path][MAX_REQUESTS_KEY_NAME]))
124 | denied_request = 1
125 |
126 | print('Service is busy. Request has been denied.')
127 | abort(503, {'message': 'Service is busy, please try again later.'})
128 |
129 | if (disable_request_metric == 'False'):
130 | self.log.track_metric(APP_INSIGHTS_REQUESTS_KEY_NAME + request.path, denied_request)
131 |
132 | def increment_requests(self, api_path):
133 | self.func_request_counts[self.api_prefix + api_path] += 1
134 |
135 | def decrement_requests(self, api_path):
136 | self.func_request_counts[self.api_prefix + api_path] -= 1
137 |
138 | def wrap_sync_endpoint(self, trace_name=None, *args, **kwargs):
139 | if (self.tracer):
140 | if (not trace_name):
141 | api_path = kwargs['api_path']
142 | trace_name = api_path
143 |
144 | with self.tracer.span(name=trace_name) as span:
145 | return self._execute_func_with_counter(*args, **kwargs)
146 | else:
147 | return self._execute_func_with_counter(*args, **kwargs)
148 |
149 | def wrap_async_endpoint(self, trace_name=None, *args, **kwargs):
150 | if (self.tracer):
151 | if (not trace_name):
152 | api_path = kwargs['api_path']
153 | trace_name = api_path
154 |
155 | with self.tracer.span(name=trace_name) as span:
156 | self._create_and_execute_thread(*args, **kwargs)
157 | else:
158 | self._create_and_execute_thread(*args, **kwargs)
159 |
160 | def _create_and_execute_thread(self, *args, **kwargs):
161 | kwargs['request'] = request
162 | thread = Thread(target = self._execute_func_with_counter, args=args, kwargs=kwargs)
163 | thread.start()
164 |
165 | def _log_and_fail_exeception(self, **kwargs):
166 | exc_type, exc_value, exc_traceback = sys.exc_info()
167 | ex_str = traceback.format_exception(exc_type, exc_value,exc_traceback)
168 | print(ex_str)
169 |
170 | if ('taskId' in kwargs):
171 | taskId = kwargs['taskId']
172 | if taskId:
173 | self.log.log_exception(ex_str)
174 | self.api_task_manager.FailTask(taskId, 'Task failed - please contact support or try again.')
175 | else:
176 | self.log.log_exception(ex_str)
177 | else:
178 | self.log.log_exception(ex_str)
179 |
180 | def _execute_func_with_counter(self, *args, **kwargs):
181 | func = kwargs['func']
182 | api_path = kwargs['api_path']
183 |
184 | self.increment_requests(api_path)
185 | try:
186 | r = func(*args, **kwargs)
187 | return r
188 | except Exception as e:
189 | self._log_and_fail_exeception(e)
190 | abort(500)
191 | finally:
192 | self.decrement_requests(api_path)
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/azure_monitor_logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from os import getenv
3 | from opencensus.ext.azure.log_exporter import AzureLogHandler
4 | from opencensus.ext.azure.trace_exporter import AzureExporter
5 | from opencensus.ext.flask.flask_middleware import FlaskMiddleware
6 | from opencensus.stats import aggregation as aggregation_module
7 | from opencensus.tags import tag_map as tag_map_module
8 | from opencensus.ext.azure import metrics_exporter
9 | from opencensus.stats import measure as measure_module
10 | from opencensus.stats import view as view_module
11 | from opencensus.trace.samplers import ProbabilitySampler, AlwaysOnSampler
12 | from opencensus.trace.tracer import Tracer
13 | from opencensus.stats import stats as stats_module
14 | from opencensus.trace import config_integration
15 |
16 | CONF_SERVICE_OWNER = "SERVICE_OWNER"
17 | CONF_SERVICE_NAME = "SERVICE_NAME"
18 | CONF_SERVICE_VERSION = "SERVICE_VERSION"
19 | CONF_SERVICE_CLUSTER = "SERVICE_CLUSTER"
20 | CONF_SERVICE_MODEL_NAME = "SERVICE_MODEL_NAME"
21 | CONF_SERVICE_MODEL_FRAMEWORK = "SERVICE_MODEL_FRAMEWORK"
22 | CONF_SERVICE_MODEL_FRAMEOWRK_VERSION = "SERVICE_MODEL_FRAMEOWRK_VERSION"
23 | CONF_SERVICE_MODEL_VERSION = "SERVICE_MODEL_VERSION"
24 | CONF_SERVICE_CONTAINER_VERSION = "SERVICE_CONTAINER_VERSION"
25 | CONF_SERVICE_CONTAINER_NAME = "SERVICE_CONTAINER_NAME"
26 |
27 | class AzureMonitorLogger(object):
28 | def __init__(self, logger, flask_app=None):
29 | self._properties = {
30 | 'service_name': getenv(CONF_SERVICE_NAME),
31 | 'service_version': getenv(CONF_SERVICE_VERSION),
32 | 'service_cluster': getenv(CONF_SERVICE_CLUSTER),
33 | 'service_model_name': getenv(CONF_SERVICE_MODEL_NAME),
34 | 'service_model_version': getenv(CONF_SERVICE_MODEL_VERSION),
35 | 'service_container_version': getenv(CONF_SERVICE_CONTAINER_VERSION),
36 | 'service_container_name': getenv(CONF_SERVICE_CONTAINER_NAME),
37 | 'task_id': 'none'
38 | }
39 | self.logger = logger
40 | self.metrics = {}
41 | self.tracer = None
42 | self.appinsights_key = getenv('APPINSIGHTS_INSTRUMENTATIONKEY', None)
43 |
44 | if self.appinsights_key:
45 | try:
46 | print('Setting up Azure Monitor with Application Insights.')
47 | config_integration.trace_integrations(['logging'])
48 | #self.logger = logging.getLogger(getenv(CONF_SERVICE_NAME))
49 | self.logger.setLevel(logging.INFO)
50 | handler = AzureLogHandler(connection_string='InstrumentationKey=' + self.appinsights_key)
51 | self.logger.addHandler(handler)
52 |
53 | self.azure_exporter=AzureExporter(connection_string='InstrumentationKey=' + self.appinsights_key, timeout=getenv('APPINSIGHTS_TIMEOUT', 30.0))
54 |
55 | sampling_rate = getenv('TRACE_SAMPLING_RATE', None)
56 | if not sampling_rate:
57 | sampling_rate = 1.0
58 |
59 | self.middleware = None
60 | if flask_app:
61 | self.middleware = FlaskMiddleware(
62 | flask_app,
63 | exporter=self.azure_exporter,
64 | sampler=ProbabilitySampler(rate=float(sampling_rate)),
65 | )
66 |
67 | #self.tracer = Tracer(
68 | # exporter=self.azure_exporter,
69 | # sampler=ProbabilitySampler(rate=float(sampling_rate)),
70 | #)
71 | self.tracer = Tracer(exporter=self.azure_exporter, sampler=AlwaysOnSampler())
72 |
73 | self.metrics_exporter = metrics_exporter.new_metrics_exporter(connection_string='InstrumentationKey=' + self.appinsights_key)
74 | stats = stats_module.stats
75 | self.view_manager = stats.view_manager
76 | self.view_manager.register_exporter(self.metrics_exporter)
77 | self.stats_recorder = stats.stats_recorder
78 | except Exception as e:
79 | print('Exception in setting up the Azure Monitor:')
80 | print(e)
81 |
82 | def log_debug(self, message, taskId = None, additionalProperties = None):
83 | properties = self._properties
84 | properties['task_id'] = taskId
85 | if additionalProperties:
86 | properties.update(additionalProperties)
87 |
88 | custom_dimensions = {'custom_dimensions': properties}
89 | self.logger.debug(message, extra=custom_dimensions)
90 |
91 | def log_info(self, message, taskId = None, additionalProperties = None):
92 | properties = self._properties
93 | properties['task_id'] = taskId
94 | if additionalProperties:
95 | properties.update(additionalProperties)
96 |
97 | custom_dimensions = {'custom_dimensions': properties}
98 |
99 | self.logger.info(message, extra=custom_dimensions)
100 |
101 | def log_warn(self, message, taskId = None, additionalProperties = None):
102 | properties = self._properties
103 | properties['task_id'] = taskId
104 | if additionalProperties:
105 | properties.update(additionalProperties)
106 |
107 | custom_dimensions = {'custom_dimensions': properties}
108 | self.logger.warning(message, extra=custom_dimensions)
109 |
110 | def log_error(self, message, taskId = None, additionalProperties = None):
111 | properties = self._properties
112 | properties['task_id'] = taskId
113 | if additionalProperties:
114 | properties.update(additionalProperties)
115 |
116 | custom_dimensions = {'custom_dimensions': properties}
117 | self.logger.error(message, extra=custom_dimensions)
118 |
119 | def log_exception(self, message, taskId = None, additionalProperties = None):
120 | properties = self._properties
121 | properties['task_id'] = taskId
122 | if additionalProperties:
123 | properties.update(additionalProperties)
124 |
125 | custom_dimensions = {'custom_dimensions': properties}
126 | self.logger.exception(message, extra=custom_dimensions)
127 |
128 | def track_metric(self, metric_name, metric_value):
129 | try:
130 | if (self.appinsights_key):
131 | if (not metric_name in self.metrics):
132 | metrics_measure = measure_module.MeasureInt(metric_name, metric_name, metric_name)
133 | metrics_view = view_module.View(metric_name, metric_name, [], metrics_measure, aggregation_module.LastValueAggregation(value=metric_value))
134 |
135 | self.view_manager.register_view(metrics_view)
136 | mmap = self.stats_recorder.new_measurement_map()
137 | tmap = tag_map_module.TagMap()
138 |
139 | self.metrics[metric_name] = {'measure': metrics_measure, 'measurement_map': mmap, 'tag_map': tmap}
140 |
141 | measure = self.metrics[metric_name]['measure']
142 | mmap = self.metrics[metric_name]['measurement_map']
143 | tmap = self.metrics[metric_name]['tag_map']
144 | mmap.measure_int_put(measure, metric_value)
145 | mmap.record(tmap)
146 | except Exception as e:
147 | print('Exception when tracking a metric:')
148 | print(e)
149 |
--------------------------------------------------------------------------------
/Containers/base-py/ai4e_api_tools/task_management/api_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | from datetime import datetime
4 | import json
5 | import os
6 | import multiprocessing
7 | from typing import Any, Dict
8 | import uuid
9 |
10 | import requests
11 |
12 |
13 | print("Creating task manager.")
14 |
15 | LOCAL_BLOB_TEST_DIRECTORY = os.getenv('LOCAL_BLOB_TEST_DIRECTORY', '.')
16 |
17 | class TaskManager:
18 |
19 | # use a lock whenever we access task_status.json
20 | task_status_json_lock = multiprocessing.Lock()
21 |
22 | def __init__(self):
23 | self.status_dict = {}
24 | self.task_status_json_path = os.path.join(
25 | LOCAL_BLOB_TEST_DIRECTORY, 'task_status.json')
26 |
27 | def GetTaskId(self) -> str:
28 | return str(uuid.uuid4())
29 |
30 | def AddTask(self, request):
31 | status = {
32 | 'TaskId': self.GetTaskId(),
33 | 'Status': 'created',
34 | 'Timestamp': datetime.strftime(
35 | datetime.utcnow(), "%Y-%m-%d %H:%M:%S"),
36 | 'Endpoint': request.path
37 | }
38 |
39 | with self.task_status_json_lock:
40 | statuses = []
41 | if os.path.isfile(self.task_status_json_path):
42 | with open(self.task_status_json_path, 'r') as f:
43 | statuses = json.load(f)
44 | statuses.append(status)
45 |
46 | with open(self.task_status_json_path, 'w') as f:
47 | json.dump(statuses, f)
48 | return status
49 |
50 | def UpdateTaskStatus(self, taskId: str, status: Any) -> None:
51 | with self.task_status_json_lock:
52 | if not os.path.isfile(self.task_status_json_path):
53 | raise ValueError('taskId "{}" is not found. Decorate your endpoint with an ai4e_service decorator or call AddTask(request) before UpdateTaskStatus.'.format(taskId))
54 |
55 | statuses = []
56 | with open(self.task_status_json_path, 'r') as f:
57 | statuses = json.load(f)
58 |
59 | task_status = None
60 | for rec_status in statuses:
61 | if rec_status['TaskId'] == taskId:
62 | task_status = rec_status
63 | break
64 |
65 | if task_status is None:
66 | raise ValueError('taskId "{}" is not found. Decorate your endpoint with an ai4e_service decorator or call AddTask(request) before UpdateTaskStatus.'.format(taskId))
67 | else:
68 | task_status['Status'] = status
69 | task_status['Timestamp'] = datetime.strftime(
70 | datetime.utcnow(), "%Y-%m-%d %H:%M:%S")
71 |
72 | with open(self.task_status_json_path, 'w') as f:
73 | json.dump(statuses, f)
74 |
75 | def AddPipelineTask(self, taskId, organization_moniker, version, api_name, body):
76 | next_url = version + '/' + organization_moniker + '/' + api_name
77 |
78 | host = os.getenv('LOCAL_NEXT_API_HOST_IN_PIPELINE', '')
79 |
80 | if len(host) > 0:
81 | next_url = str(host) + '/' + str(next_url)
82 |
83 | r = requests.post(next_url, data=body)
84 |
85 | if r.status_code != 200:
86 | ai4e_service.api_task_manager.UpdateTaskStatus(taskId, "Pipelining is not supported in a single node deployment, but the next service is: " + next_url)
87 | return "Pipelining is not supported in a single node deployment, but the next service is: " + next_url
88 | else:
89 | return r.status_code
90 |
91 | def CompleteTask(self, taskId, status):
92 | self.UpdateTaskStatus(taskId, status)
93 |
94 | def FailTask(self, taskId, status):
95 | self.UpdateTaskStatus(taskId, status)
96 |
97 | def GetTaskStatus(self, taskId: str) -> Dict[str, Any]:
98 | with self.task_status_json_lock:
99 | if os.path.isfile(self.task_status_json_path):
100 | with open(self.task_status_json_path, 'r') as f:
101 | statuses = json.load(f)
102 |
103 | for rec_status in statuses:
104 | if rec_status['TaskId'] == taskId:
105 | return rec_status
106 |
107 | status = {
108 | 'TaskId': taskId,
109 | 'Status': 'Not found.',
110 | 'Timestamp': datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S"),
111 | 'Endpoint': ''
112 | }
113 | return status
114 |
--------------------------------------------------------------------------------
/Containers/base-py/requirements.txt:
--------------------------------------------------------------------------------
1 | # Install Azure Blob SDK
2 | azure-identity==1.3.1
3 | azure-mgmt-storage==9.0.0
4 | azure-storage-blob==12.3.1
5 |
6 | # Install hosting requirements
7 | Flask==1.1.2
8 | Flask-RESTful==0.3.8
9 | gunicorn==20.0.4
10 |
11 | # Install Application Insights Opencensus packages
12 | #applicationinsights==0.11.9
13 | opencensus-ext-logging==0.1.0
14 | opencensus-ext-azure==1.0.4
15 | opencensus-ext-flask==0.7.3
--------------------------------------------------------------------------------
/Containers/base-r/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | #mcr.microsoft.com/aiforearth/base-r:version
4 | FROM osgeo/gdal:ubuntu-full-3.0.3
5 | ARG DEBIAN_FRONTEND=noninteractive
6 |
7 | RUN apt-get update --fix-missing && \
8 | apt-get install -y wget supervisor bzip2 && \
9 | apt-get clean && \
10 | rm -rf /var/lib/apt/lists/*
11 |
12 | RUN apt-get update \
13 | && apt-get install -y --no-install-recommends \
14 | r-base \
15 | r-base-dev \
16 | r-recommended
17 |
18 | RUN apt-get update \
19 | && apt-get install -y \
20 | apt-transport-https \
21 | build-essential \
22 | ed \
23 | gtk2.0 \
24 | libcurl4-openssl-dev \
25 | libgtk2.0-dev \
26 | libiodbc2-dev \
27 | libnlopt-dev \
28 | libssh2-1-dev \
29 | libssl-dev \
30 | libxml2-dev \
31 | software-properties-common \
32 | wget \
33 | xvfb \
34 | && rm -rf /var/lib/apt/lists/*
35 |
36 | RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
37 | RUN add-apt-repository 'deb [arch=amd64,i386] https://cran.rstudio.com/bin/linux/ubuntu bionic-cran35/'
38 |
39 | RUN apt-get update \
40 | && apt-get install -y --no-install-recommends \
41 | r-base \
42 | r-base-dev \
43 | r-recommended
44 |
45 | RUN echo "r <- getOption('repos'); r['CRAN'] <- 'http://cran.r-project.org'; options(repos = r);" > ~/.Rprofile
46 | RUN Rscript -e 'install.packages(c("curl", "httr"));'
47 | RUN Rscript -e 'Sys.setenv(CURL_CA_BUNDLE="/utils/microsoft-r-open-3.4.3/lib64/R/lib/microsoft-r-cacert.pem");'
48 | RUN Rscript -e 'install.packages("sp");'
49 | RUN Rscript -e 'install.packages("rgdal");'
50 | RUN Rscript -e 'install.packages("plumber");'
51 | RUN Rscript -e 'install.packages("R.utils");'
52 | RUN Rscript -e 'install.packages("future");'
53 | RUN Rscript -e 'install.packages("devtools");'
54 | RUN Rscript -e 'install.packages("RCurl");'
55 | RUN Rscript -e 'install.packages("sjmisc");'
56 | RUN Rscript -e 'install.packages("reticulate");'
57 | RUN Rscript -e 'install.packages("urltools");'
58 | RUN Rscript -e 'install.packages("ids");'
59 |
60 | RUN add-apt-repository ppa:deadsnakes/ppa \
61 | && apt-get update \
62 | && apt-get install -y python3-setuptools \
63 | python3.7-dev \
64 | python3.7
65 |
66 | RUN apt-get update && apt-get install python3-pip -y
67 | RUN python3.7 -m pip install pip --upgrade
68 |
69 | COPY ./base-py/requirements.txt /ai4e_api_tools/
70 | WORKDIR /ai4e_api_tools
71 | RUN pip3 install -r ./requirements.txt
72 |
73 | COPY ./base-r/ai4e_api_tools /ai4e_api_tools/
74 | COPY ./common/sas_blob.py /ai4e_api_tools/
75 | COPY ./common/aad_blob.py /ai4e_api_tools/
76 |
77 | ENV PYTHONPATH="${PYTHONPATH}:/ai4e_api_tools"
--------------------------------------------------------------------------------
/Containers/base-r/ai4e_api_tools/ai4e_app_insights.R:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | # Primary Application Insights R libary.
4 |
5 | library(httr)
6 | library(jsonlite)
7 | library(sjmisc)
8 |
9 | APP_SVC_URL <- "https://dc.services.visualstudio.com/v2/track"
10 |
11 | APPINSIGHTS_INSTRUMENTATIONKEY <- Sys.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")
12 | CONF_SERVICE_OWNER <- Sys.getenv("SERVICE_OWNER", unset="")
13 | CONF_SERVICE_CLUSTER <- Sys.getenv("SERVICE_CLUSTER", unset="")
14 | CONF_SERVICE_MODEL_NAME <- Sys.getenv("SERVICE_MODEL_NAME", unset="")
15 | CONF_SERVICE_MODEL_FRAMEWORK <- Sys.getenv("SERVICE_MODEL_FRAMEWORK", unset="")
16 | CONF_SERVICE_MODEL_FRAMEOWRK_VERSION <- Sys.getenv("SERVICE_MODEL_FRAMEOWRK_VERSION", unset="")
17 | CONF_SERVICE_MODEL_VERSION <- Sys.getenv("SERVICE_MODEL_VERSION", unset="")
18 | CONF_SERVICE_NAME <- Sys.getenv("SERVICE_NAME", unset="")
19 | CONF_SERVICE_VERSION <- Sys.getenv("SERVICE_VERSION", unset="")
20 | CONF_SERVICE_CONTAINER_VERSION <- Sys.getenv("SERVICE_CONTAINER_VERSION", unset="")
21 | CONF_SERVICE_CONTAINER_NAME <- Sys.getenv("SERVICE_CONTAINER_NAME", unset="")
22 | IS_DEBUG <- Sys.getenv("DEBUG", unset=FALSE)
23 |
24 | if (nchar(trim(APPINSIGHTS_INSTRUMENTATIONKEY)) == 0) {
25 | APPINSIGHTS_INSTRUMENTATIONKEY = NULL
26 | }
27 |
28 | log <- function(message, sev, taskId, additionalProperties){
29 | if (!is.null(APPINSIGHTS_INSTRUMENTATIONKEY)) {
30 | payload <- get_payload(message, sev, APPINSIGHTS_INSTRUMENTATIONKEY, taskId, additionalProperties)
31 | if (IS_DEBUG) {
32 | print(payload)
33 | }
34 | r = POST(APP_SVC_URL, body=payload)
35 |
36 | if (status_code(r) != 200){
37 | print(http_status(r))
38 | }
39 | }
40 | }
41 |
42 | log_debug <- function(message, taskId, additionalProperties){
43 | log(message, "Verbose", taskId, additionalProperties)
44 | }
45 |
46 | log_info <- function(message, taskId, additionalProperties){
47 | log(message, "Information", taskId, additionalProperties)
48 | }
49 |
50 | log_warn <- function(message, taskId, additionalProperties){
51 | log(message, "Warning", taskId, additionalProperties)
52 | }
53 |
54 | log_error <- function(message, taskId, additionalProperties){
55 | log(message, "Error", taskId, additionalProperties)
56 | }
57 |
58 | log_exception <- function(message, taskId, additionalProperties){
59 | log(message, "Critical", taskId, additionalProperties)
60 | }
61 |
62 | get_payload <- function(msg, sev, key, taskId, additionalProperties){
63 | messaged <- msg
64 | severityLeveld <- sev
65 | propertiesd <- get_properties(taskId)
66 |
67 | if(!missing(additionalProperties)) {
68 | propertiesd <- c(propertiesd, additionalProperties)
69 | }
70 |
71 | baseDatad <- list(message = messaged, severityLevel = severityLeveld, properties = propertiesd)
72 |
73 | baseTyped <- "MessageData"
74 |
75 | named <- "MessageData"
76 | timed <- as.POSIXlt(Sys.time(), "UTC", "%Y-%m-%dT%H:%M:%SZ")
77 | iKeyd <- key
78 | datad <- list(baseType = baseTyped, baseData = baseDatad)
79 | payload <- list(name = named, time = timed, iKey = iKeyd, data = datad)
80 |
81 | json_payload <- toJSON(payload, pretty = TRUE, auto_unbox = TRUE)
82 | return(json_payload)
83 | }
84 |
85 | get_properties <- function(taskId){
86 | props <- list()
87 | props[[ "task_id" ]] <- taskId
88 | props[[ "service_owner" ]] <- CONF_SERVICE_OWNER
89 | props[[ "service_cluster" ]] <- CONF_SERVICE_CLUSTER
90 | props[[ "service_model_name" ]] <- CONF_SERVICE_MODEL_NAME
91 | props[[ "service_model_framework" ]] <- CONF_SERVICE_MODEL_FRAMEWORK
92 | props[[ "service_model_framework_version" ]] <- CONF_SERVICE_MODEL_FRAMEOWRK_VERSION
93 | props[[ "service_model_version" ]] <- CONF_SERVICE_MODEL_VERSION
94 | props[[ "service_name" ]] <- CONF_SERVICE_NAME
95 | props[[ "service_version" ]] <- CONF_SERVICE_VERSION
96 | props[[ "service_container_version" ]] <- CONF_SERVICE_CONTAINER_VERSION
97 | props[[ "service_container_name" ]] <- CONF_SERVICE_CONTAINER_NAME
98 |
99 | return(props)
100 | }
--------------------------------------------------------------------------------
/Containers/base-r/ai4e_api_tools/task_management/api_task.R:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | library(httr)
4 | library(ids)
5 |
6 | AddTask<-function(req){
7 | taskId = uuid()
8 |
9 | newTask <- data.frame(
10 | TaskId = taskId,
11 | Status = c("queued"),
12 | Timestamp = c(format(Sys.time(), "%b %d %X %Y")),
13 | Endpoint = "uri"
14 | )
15 |
16 | if (file.exists("tasks.csv")) {
17 | print("file exists")
18 | tasks<-read.csv("tasks.csv", stringsAsFactors=FALSE)
19 | write.csv(rbind(tasks, newTask), "tasks.csv", row.names=FALSE)
20 | }
21 | else {
22 | write.csv(newTask, "tasks.csv", row.names=FALSE)
23 | }
24 |
25 | return(newTask)
26 | }
27 |
28 | UpdateTaskStatus<-function(taskId, status){
29 | taskId = as.numeric(taskId)
30 | print(paste0("updating ", taskId, " to ", status))
31 |
32 | if (file.exists("tasks.csv")) {
33 | tasks<-read.csv("tasks.csv", stringsAsFactors=FALSE)
34 |
35 | tasks[taskId, 2] <- c(status)
36 | tasks[taskId, 3] <- c(format(Sys.time(), "%b %d %X %Y"))
37 |
38 | write.csv(tasks, "tasks.csv", row.names=FALSE)
39 | }
40 | else {
41 | newTask <- data.frame(
42 | TaskId = taskId,
43 | Status = c(status),
44 | Timestamp = c(format(Sys.time(), "%b %d %X %Y")),
45 | Endpoint = "uri"
46 | )
47 |
48 | write.csv(newTask, "tasks.csv", row.names=FALSE)
49 | }
50 | }
51 |
52 | CompleteTask<-function(taskId, status){
53 | UpdateTaskStatus(taskId, status)
54 | }
55 |
56 | FailTask<-function(taskId, status){
57 | UpdateTaskStatus(taskId, status)
58 | }
59 |
60 | AddPipelineTask<-function(taskId, organization_moniker, version, api_name, body) {
61 | next_url <- paste(version, organization_moniker, api_name, sep = "/")
62 |
63 | host <- Sys.getenv("LOCAL_NEXT_API_HOST_IN_PIPELINE")
64 |
65 | if (!is.empty(host)) {
66 | next_url <- paste(host, next_url, sep = "/")
67 | }
68 |
69 | tryCatch({
70 | res <- POST(next_url, body=body)
71 |
72 | if (status_code(r) != 200) {
73 | UpdateTaskStatus(taskId, paste0("Pipelining is not supported in a single node deployment, but the next service is: ", next_url))
74 | return(paste0("Pipelining is not supported in a single node deployment, but the next service is: ", next_url))
75 | }
76 | }, error = function(err) {
77 | print(err)
78 | UpdateTaskStatus(taskId, paste0("Pipelining is not supported in a single node deployment, but the next service is: ", next_url))
79 | return(paste0("Pipelining is not supported in a single node deployment, but the next service is: ", next_url))
80 | })
81 |
82 | return(res)
83 | }
84 |
85 | #* Get status of task by id
86 | #* @param taskId The id of the task
87 | #* @get /task/
88 | GetTaskStatus<-function(taskId){
89 | if (!file.exists("tasks.csv")) {
90 | return("Task not found.")
91 | }
92 |
93 | taskId = as.numeric(taskId)
94 | tasks<-read.csv("tasks.csv")
95 | tasks[taskId, 1:3]
96 | }
97 |
98 | # Please have an empty last line in the end; otherwise, you will see an error when starting a webserver
99 |
--------------------------------------------------------------------------------
/Containers/base-r/requirements.txt:
--------------------------------------------------------------------------------
1 | setuptools==65.5.1
2 |
3 | # Install Azure Blob SDK
4 | azure-identity==1.3.1
5 | azure-mgmt-storage==9.0.0
6 | azure-storage-blob==12.3.1
7 |
--------------------------------------------------------------------------------
/Containers/blob-py/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-py:1.13
2 |
3 | #############################################################################################
4 | # Add support for mounting an Azure blob
5 | # Add the Microsoft package repository and install blobfuse
6 | RUN apt-get update && apt-get install wget
7 | RUN wget https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb && \
8 | dpkg -i packages-microsoft-prod.deb && \
9 | apt-get update && \
10 | apt-get install blobfuse -y
11 |
12 | COPY ./common/blob_mounting/blob_mounter.py /app/fuse/blob_mounter.py
13 | RUN chmod +x /app/fuse/blob_mounter.py
14 | #############################################################################################
--------------------------------------------------------------------------------
/Containers/blob-py/blob_mount.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "accountName":"enter_your_accountname",
4 | "accountKey":"enter_your_accountkey",
5 | "containerName":"enter_your_container_name",
6 | "mappedDirectory":"/mnt/enter_your_local_directory"
7 | }
8 | ]
--------------------------------------------------------------------------------
/Containers/blob-py/readme.txt:
--------------------------------------------------------------------------------
1 | docker run -p 8001:80 --cap-add SYS_ADMIN --device /dev/fuse "ai4eregistry.azurecr.io/1.0-blob-py-ubuntu16.04:latest"
--------------------------------------------------------------------------------
/Containers/blob-py/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python /app/fuse/blob_mounter.py
3 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Containers/blob-r/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-r:1.6
2 |
3 | #############################################################################################
4 | # Add support for mounting an Azure blob
5 | # Add the Microsoft package repository and install blobfuse
6 | RUN wget https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb && \
7 | dpkg -i packages-microsoft-prod.deb && \
8 | apt-get update && \
9 | apt-get install blobfuse -y
10 |
11 | COPY ./common/blob_mounting/blob_mounter.py /app/fuse/blob_mounter.py
12 | RUN chmod +x /app/fuse/blob_mounter.py
13 | #############################################################################################
--------------------------------------------------------------------------------
/Containers/blob-r/blob_mount.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "accountName":"enter_your_accountname",
4 | "accountKey":"enter_your_accountkey",
5 | "containerName":"enter_your_container_name",
6 | "mappedDirectory":"/mnt/enter_your_local_directory"
7 | }
8 | ]
--------------------------------------------------------------------------------
/Containers/blob-r/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python /app/fuse/blob_mounter.py
3 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Containers/common/aad_blob.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | from azure.identity import ClientSecretCredential, ManagedIdentityCredential
4 | from azure.storage.blob import BlobServiceClient, BlobClient
5 |
6 | from datetime import datetime, timezone
7 | from email.utils import formatdate
8 | import adal
9 | import io
10 | import os
11 | from shutil import copyfile
12 | from pathlib import Path
13 | import requests
14 |
15 | CLIENT_SECRET_CRED_TYPE = "client_secret"
16 | MANAGED_IDENTITY_CRED_TYPE = "managed_identity"
17 | LOCAL_CRED_TYPE = "local"
18 |
19 | # There are 3 ways to use this class:
20 | # 1. ClientSecretCredential - provide aad_tenant_id, aad_application_id, aad_application_secret
21 | # 2. ManagedIdentityCredential - provide aad_application_id. Application must be a managed identity: https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
22 | # 3. Local - provide local_test_directory. Circumvents AAD for testing purposes only.
23 | class AadBlob:
24 | def __init__(self, aad_tenant_id=None, aad_application_id=None, aad_application_secret=None, aad_account_name=None, local_test_directory=None):
25 | self.aad_tenant_id = aad_tenant_id
26 | self.aad_application_id = aad_application_id
27 | self.aad_application_secret = aad_application_secret
28 | self.aad_account_name = aad_account_name
29 | self.local_test_directory=local_test_directory
30 |
31 | self.account_url="https://{}.blob.core.windows.net".format(self.aad_account_name)
32 |
33 | self.credential_type = LOCAL_CRED_TYPE
34 | if self.local_test_directory is None:
35 | if self.aad_application_secret is None:
36 | self.credential_type = MANAGED_IDENTITY_CRED_TYPE
37 | else:
38 | self.credential_type = CLIENT_SECRET_CRED_TYPE
39 |
40 | def _get_managed_identity_credential(self):
41 | cred = ManagedIdentityCredential(client_id=self.aad_application_id)
42 | return cred.get_token('https://storage.azure.com/.default').token
43 |
44 | def _get_blob_service_client(self):
45 | token_credential = ClientSecretCredential(
46 | self.aad_tenant_id,
47 | self.aad_application_id,
48 | self.aad_application_secret
49 | )
50 |
51 | blob_service_client = BlobServiceClient(
52 | account_url="https://{}.blob.core.windows.net".format(self.aad_account_name),
53 | credential=token_credential
54 | )
55 |
56 | return blob_service_client
57 |
58 |
59 |
60 |
61 | # Write Blobs............................
62 | def _upload_managed_identity_blob(self, container, blob, data):
63 | token_credential = self._get_managed_identity_credential()
64 |
65 | blob_uri = self.get_blob_uri(container, blob)
66 |
67 | headers = { 'Authorization': 'Bearer ' + token_credential,
68 | 'x-ms-version': '2019-07-07',
69 | 'x-ms-date': formatdate(timeval=None, localtime=False, usegmt=True),
70 | 'x-ms-blob-type': 'BlockBlob',
71 | 'Content-Length': str(len(data))}
72 |
73 | r = requests.put(blob_uri, headers=headers, data=data)
74 | r.raise_for_status()
75 | return r
76 |
77 | def _write_blob(self, container, blob, data):
78 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
79 | get_response = self._upload_managed_identity_blob(container, blob, data)
80 | return get_response
81 |
82 | else: # CLIENT_SECRET_CRED_TYPE:
83 | blob_service_client = self._get_blob_service_client()
84 | blob_client = blob_service_client.get_blob_client(container, blob)
85 | blob_client.upload_blob(data, overwrite=True)
86 |
87 | def write_blob_from_text(self, container, blob, text):
88 | print("{} for {}/{}".format('write_blob_from_text', container, blob))
89 |
90 | if not self.credential_type is LOCAL_CRED_TYPE:
91 | return self._write_blob(container, blob, text)
92 |
93 | else: # LOCAL_CRED_TYPE
94 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
95 | if not os.path.exists(os.path.dirname(abosolute_file_name)):
96 | os.makedirs(os.path.dirname(abosolute_file_name))
97 |
98 | f = open(abosolute_file_name, 'w')
99 | f.write(text)
100 | f.close()
101 | return abosolute_file_name
102 |
103 | def create_blob_from_path(self, container, blob, path):
104 | print("{} for {}/{}".format('create_blob_from_path', container, blob))
105 |
106 | if not self.credential_type is LOCAL_CRED_TYPE:
107 | with open(path, "rb") as handle:
108 | data = handle.read()
109 | return self._write_blob(container, blob, data)
110 |
111 | else: # LOCAL_CRED_TYPE
112 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
113 | if not os.path.exists(os.path.dirname(abosolute_file_name)):
114 | os.makedirs(os.path.dirname(abosolute_file_name))
115 |
116 | f = open(abosolute_file_name, 'wb')
117 | f.write(blob)
118 | f.close()
119 | return abosolute_file_name
120 |
121 | # Get Blobs............................
122 | def _download_managed_identity_blob(self, container, blob):
123 | token_credential = self._get_managed_identity_credential()
124 |
125 | blob_uri = self.get_blob_uri(container, blob)
126 | headers = { 'Authorization': "Bearer " + token_credential,
127 | 'x-ms-version': '2019-07-07',
128 | 'x-ms-date': formatdate(timeval=None, localtime=False, usegmt=True)}
129 |
130 | return requests.get(blob_uri, headers=headers)
131 |
132 | def _get_blob(self, container, blob, encoding = None):
133 | file_type = ('w' if encoding else 'wb')
134 |
135 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
136 | get_response = self._download_managed_identity_blob(container, blob)
137 | data = (get_response.text if encoding else get_response.content)
138 | return data
139 |
140 | else: # CLIENT_SECRET_CRED_TYPE:
141 | blob_service_client = self._get_blob_service_client()
142 | blob_client = blob_service_client.get_blob_client(container, blob)
143 | download_stream = blob_client.download_blob(encoding=encoding)
144 | return download_stream
145 |
146 | def save_blob_locally(self, container, blob, local_file, encoding = None):
147 | file_type = ('w' if encoding else 'wb')
148 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
149 | with open(local_file, file_type) as open_file:
150 | data = self._get_blob(container, blob, encoding)
151 | open_file.write(data)
152 |
153 | elif self.credential_type is CLIENT_SECRET_CRED_TYPE:
154 | with open(local_file, file_type) as open_file:
155 | data_stream = self._get_blob(container, blob, encoding)
156 | open_file.write(data_stream.readall())
157 |
158 | else: # LOCAL_CRED_TYPE
159 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
160 | copyfile(abosolute_file_name, local_file)
161 |
162 | # Saves a binary blob locally
163 | def save_local_blob(self, container, blob, local_file):
164 | print("{} for {}/{}".format('save_local_blob', container, blob))
165 | return self.save_blob_locally(container, blob, local_file)
166 |
167 | # Saves a UTF-8 blob locally
168 | def save_local_text(self, container, blob, local_file):
169 | print("{} for {}/{}".format('save_local_text', container, blob))
170 | return self.save_blob_locally(container, blob, local_file, encoding='UTF-8')
171 |
172 | # Gets a blob to a stream
173 | def get_blob(self, container, blob):
174 | print("{} for {}/{}".format('get_blob', container, blob))
175 |
176 | if self.credential_type is CLIENT_SECRET_CRED_TYPE:
177 | blob_service_client = self._get_blob_service_client()
178 | blob_client = blob_service_client.get_blob_client(container, blob)
179 |
180 | with io.BytesIO() as output_stream:
181 | download_stream = blob_client.download_blob()
182 | download_stream.readinto(output_stream)
183 | return output_stream
184 | else:
185 | abosolute_file_name = os.path.join('.', container, blob)
186 |
187 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
188 | self.save_blob_locally(container, blob, abosolute_file_name)
189 | else:
190 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
191 |
192 | return open(abosolute_file_name)
193 |
194 | def get_blob_to_bytes(self, container, blob):
195 | print("{} for {}/{}".format('get_blob_to_bytes', container, blob))
196 |
197 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
198 | data = self._get_blob(container, blob)
199 | return data
200 | elif self.credential_type is CLIENT_SECRET_CRED_TYPE:
201 | data = self._get_blob(container, blob)
202 | return data.content_as_bytes()
203 | else: # LOCAL_CRED_TYPE
204 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
205 | f = open(abosolute_file_name, 'rb')
206 | bi = f.read()
207 | f.close()
208 | return bi
209 |
210 | def get_blob_to_text(self, container, blob):
211 | print("{} for {}/{}".format('get_blob_to_text', container, blob))
212 |
213 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
214 | data = self._get_blob(container, blob, encoding='UTF-8')
215 | return data
216 | elif self.credential_type is CLIENT_SECRET_CRED_TYPE:
217 | data = self._get_blob(container, blob, encoding='UTF-8')
218 | return data.readall()
219 | else: # LOCAL_CRED_TYPE
220 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
221 | print(abosolute_file_name)
222 | f = open(abosolute_file_name)
223 | txt = f.read()
224 | f.close()
225 | return txt
226 |
227 |
228 | # Helpers............................
229 | def does_blob_exist(self, container, blob):
230 | if self.credential_type is MANAGED_IDENTITY_CRED_TYPE:
231 | token_credential = self._get_managed_identity_credential()
232 |
233 | container_uri = "{}/{}?restype=container&comp=list&prefix{}".format(self.account_url, container, blob)
234 |
235 | headers = { 'Authorization': "Bearer " + token_credential,
236 | 'x-ms-version': '2019-07-07',
237 | 'x-ms-date': formatdate(timeval=None, localtime=False, usegmt=True)}
238 |
239 | blobs = requests.get(container_uri, headers=headers).json()
240 | if (len(blobs['Blobs']) > 0):
241 | return True
242 | else:
243 | return False
244 |
245 | elif self.credential_type is CLIENT_SECRET_CRED_TYPE:
246 | service = self._get_service()
247 | cc = service.get_container_client(container)
248 | blob_list = cc.list_blobs(name_starts_with=blob)
249 | for b in blob_list:
250 | if b.name == blob:
251 | return True
252 |
253 | return False
254 |
255 | else: # LOCAL_CRED_TYPE
256 | abosolute_file_name = os.path.join(self.local_test_directory, container, blob)
257 | blobfile = Path(abosolute_file_name)
258 | return blobfile.exists()
259 |
260 | def get_blob_uri(self, container, blob):
261 | if self.local_test_directory is None:
262 | return "{}/{}/{}".format(self.account_url, container, blob)
263 | else:
264 | return os.path.join(self.local_test_directory, container, blob)
265 |
266 |
--------------------------------------------------------------------------------
/Containers/common/blob_mounting/blob_mounter.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | import json
4 | import subprocess
5 | import os, shutil
6 | import sys
7 |
8 | def execute_bash(bash_command):
9 | print("Executing command: " + str(bash_command))
10 | process = subprocess.Popen(bash_command, shell=True)
11 | output, error = process.communicate()
12 | print("output: " + str(output))
13 | print("error: " + str(error))
14 |
15 | with open('/app/fuse/blob_mount.json') as f:
16 | try:
17 | blob_config = json.load(f)
18 |
19 | if not os.path.exists("/var/fuze_connections"):
20 | os.mkdir("/var/fuze_connections")
21 |
22 | i = 0
23 | for cfg in blob_config:
24 | i = i + 1
25 | fuse_cfg_file = "/var/fuze_connections/fuse_" + str(i) + ".cfg"
26 | fuse_file = open(fuse_cfg_file, "w")
27 | fuse_file.write("accountName " + cfg["accountName"] + "\n")
28 | fuse_file.write("accountKey " + cfg["accountKey"] + "\n")
29 | fuse_file.write("containerName " + cfg["containerName"] + "\n")
30 | fuse_file.close()
31 |
32 | base_dir = os.path.basename(cfg["mappedDirectory"])
33 | resource_dir = "/mnt/resource/blobfusetmp/" + base_dir
34 |
35 | os.makedirs(resource_dir)
36 | os.chmod(fuse_cfg_file, 700)
37 | os.makedirs(cfg["mappedDirectory"])
38 |
39 | fuze_mount_cmd = "blobfuse " + cfg["mappedDirectory"] + " --tmp-path=" + resource_dir + " --config-file=" + fuse_cfg_file + " -o attr_timeout=240" + " -o entry_timeout=240" + " -o negative_timeout=120"
40 | execute_bash(fuze_mount_cmd)
41 |
42 | except:
43 | print("Unexpected error during blob mounting")
44 | raise
--------------------------------------------------------------------------------
/Containers/common/sas_blob.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | import uuid
4 | import io
5 | from datetime import datetime, timedelta
6 | from urllib.parse import urlsplit, urlparse
7 |
8 | from azure.identity import ClientSecretCredential
9 | from azure.storage.blob import BlobServiceClient, generate_container_sas, ContainerSasPermissions, ContainerClient, BlobClient
10 |
11 | class SasBlob:
12 | def _get_resource_reference(self, prefix):
13 | return '{}{}'.format(prefix, str(uuid.uuid4()).replace('-', ''))
14 |
15 | def get_container_from_uri(self, sas_uri):
16 | url_parts = urlsplit(sas_uri)
17 |
18 | raw_path = url_parts.path[1:]
19 | container = raw_path.split('/')[0]
20 |
21 | return container
22 |
23 | def get_blob_from_uri(self, sas_uri):
24 | url_parts = urlsplit(sas_uri)
25 |
26 | raw_path = url_parts.path[1:]
27 | blob = raw_path.split('/')[1]
28 |
29 | return blob
30 |
31 | def get_sas_key_from_uri(self, sas_uri):
32 | url_parts = urlsplit(sas_uri)
33 | return url_parts.query
34 |
35 | def get_account_from_uri(self, sas_uri):
36 | url_parts = urlsplit(sas_uri)
37 | loc = url_parts.netloc
38 | return loc.split(".")[0]
39 |
40 | def delete_container(self, account_name, account_key, container_name):
41 | account_url = "https://{}.blob.core.windows.net".format(account_name)
42 |
43 | blob_service_client = BlobServiceClient(account_url=account_url, credential=account_key)
44 | blob_service_client.delete_container(container_name)
45 |
46 | def create_writable_container_sas(self, account_name, account_key, container_name, access_duration_hrs):
47 | account_url = "https://{}.blob.core.windows.net".format(account_name)
48 |
49 | blob_service_client = BlobServiceClient(account_url=account_url, credential=account_key)
50 | container_client = blob_service_client.create_container(container_name)
51 |
52 | sas_permissions = ContainerSasPermissions(read=True, write=True, delete=False, list=True)
53 |
54 | expiration = datetime.utcnow() + timedelta(hours=access_duration_hrs)
55 |
56 | sas_token = generate_container_sas(
57 | account_name,
58 | container_name,
59 | account_key=account_key,
60 | permission=sas_permissions,
61 | expiry=expiration
62 | )
63 |
64 | return '{}/{}?{}'.format(account_url, container_name, sas_token)
65 |
66 | def write_blob_from_bytes(self, container_sas_uri, blob_name, input_bytes):
67 | container_client = ContainerClient.from_container_url(container_sas_uri)
68 | blob_client = container_client.get_blob_client(blob_name)
69 |
70 | blob_client.upload_blob(input_bytes, overwrite=True)
71 |
72 | account_name = self.get_account_from_uri(container_sas_uri)
73 | container_name = self.get_container_from_uri(container_sas_uri)
74 | sas_key = self.get_sas_key_from_uri(container_sas_uri)
75 | return 'https://{}.blob.core.windows.net/{}/{}?{}'.format(account_name, container_name, blob_name, sas_key)
76 |
77 | def write_blob_from_text(self, container_sas_uri, blob_name, text):
78 | container_client = ContainerClient.from_container_url(container_sas_uri)
79 | blob_client = container_client.get_blob_client(blob_name)
80 | blob_client.upload_blob(text, overwrite=True)
81 |
82 | account_name = self.get_account_from_uri(container_sas_uri)
83 | container_name = self.get_container_from_uri(container_sas_uri)
84 | sas_key = self.get_sas_key_from_uri(container_sas_uri)
85 | return 'https://{}.blob.core.windows.net/{}/{}?{}'.format(account_name, container_name, blob_name, sas_key)
86 |
87 | def write_blob(self, container_sas_uri, blob_name, input_stream):
88 | container_client = ContainerClient.from_container_url(container_sas_uri)
89 | blob_client = container_client.get_blob_client(blob_name)
90 | blob_client.upload_blob(input_stream, overwrite=True)
91 |
92 | account_name = self.get_account_from_uri(container_sas_uri)
93 | container_name = self.get_container_from_uri(container_sas_uri)
94 | sas_key = self.get_sas_key_from_uri(container_sas_uri)
95 | return 'https://{}.blob.core.windows.net/{}/{}?{}'.format(account_name, container_name, blob_name, sas_key)
96 |
97 | def get_blob(self, sas_uri):
98 | blob_client = BlobClient.from_blob_url(sas_uri)
99 | download_stream = blob_client.download_blob()
100 |
101 | with io.BytesIO() as output_stream:
102 | download_stream.readinto(output_stream)
103 | return output_stream
104 |
105 | def save_local_text(self, sas_uri, local_file):
106 | blob_client = BlobClient.from_blob_url(sas_uri)
107 |
108 | with open(local_file, "w") as open_file:
109 | download_stream = blob_client.download_blob(encoding='UTF-8')
110 | open_file.write(download_stream.readall())
111 |
112 | def get_blob_sas_uri(self, container_sas_uri, blob_name):
113 | account_name = self.get_account_from_uri(container_sas_uri)
114 | container_name = self.get_container_from_uri(container_sas_uri)
115 | sas_key = self.get_sas_key_from_uri(container_sas_uri)
116 | return 'https://{}.blob.core.windows.net/{}/{}?{}'.format(account_name, container_name, blob_name, sas_key)
--------------------------------------------------------------------------------
/Documentation/landcover_api_spec_swagger.0.1.json:
--------------------------------------------------------------------------------
1 | {
2 | "swagger": "2.0",
3 | "info": {
4 | "title": "AI for Earth Landcover API",
5 | "version": "v0.1",
6 | "description": "This specification represents the core [AI for Earth](https://www.microsoft.com/en-us/aiforearth) API offering. An access key is required for access."
7 | },
8 | "host": "aiforearth.azure-api.net",
9 | "basePath": "/v0.1",
10 | "schemes": [
11 | "https"
12 | ],
13 | "securityDefinitions": {
14 | "apiKeyHeader": {
15 | "type": "apiKey",
16 | "name": "Ocp-Apim-Subscription-Key",
17 | "in": "header"
18 | },
19 | "apiKeyQuery": {
20 | "type": "apiKey",
21 | "name": "subscription-key",
22 | "in": "query"
23 | }
24 | },
25 | "security": [
26 | {
27 | "apiKeyHeader": []
28 | },
29 | {
30 | "apiKeyQuery": []
31 | }
32 | ],
33 | "paths": {
34 | "/landcover/classify": {
35 | "post": {
36 | "description": "This operation classifies the landcover for a given region based on the provided satellite image. The provided image must be a Tiff file with 4 bands representing the red, green, blue and near-infrared value of the pixel.\n\nA successful classification will return an image file corresponding to the landcover of the provided image. The following labels are possible with the corresponding color labels:\n- No Data - black (0, 0 ,0)\n- Water - blue (0, 0, 255)\n- Trees - dark green (0, 128, 0)\n- Herbaceous - light green (128, 255, 128)\n- Barren/Impervious - brown (128, 96, 96)",
37 | "operationId": "5ab5905bb8d61f0e48853404",
38 | "summary": "/landcover/classify",
39 | "parameters": [
40 | {
41 | "name": "type",
42 | "in": "query",
43 | "description": "File type of the returned image. Supported values are:\n- tiff (default)\n- jpeg",
44 | "type": "string",
45 | "default": "tiff",
46 | "enum": [
47 | "tiff",
48 | "jpeg"
49 | ]
50 | },
51 | {
52 | "name": "Content-Type",
53 | "in": "header",
54 | "description": "Media type of the request body. Currently only image/tiff is supported.",
55 | "required": true,
56 | "type": "string",
57 | "enum": [
58 | "image/tiff"
59 | ]
60 | }
61 | ],
62 | "responses": {
63 | "200": {
64 | "description": "The response body will contain an image file with the land cover labels. The image will be colored corresponding the the following labels:\n- No Data - black (0, 0 ,0)\n- Water - blue (0, 0, 255)\n- Trees - dark green (0, 128, 0)\n- Herbaceous - light green (128, 255, 128)\n- Barren/Impervious - brown (128, 96, 96)\n\nThe size of the output image will be the same as a minus a 64 pixel border around the image. For example, if the input image is 256 pixels by 256 pixels the output image will be 128 pixels by 128 pixels.",
65 | "examples": {
66 | "image/jpeg": "[binary image data]",
67 | "image/tiff": "[binary image data]"
68 | }
69 | },
70 | "400": {
71 | "description": "Possible Errors: \n\n- InvalidImageFormat\n
Input data is not a valid image. \n- InvalidImageSize\n
Input image is too large or too small. \n
"
72 | },
73 | "415": {
74 | "description": "Unsupported media type in the request body. Currently only image/tiff is supported"
75 | }
76 | },
77 | "produces": [
78 | "image/jpeg",
79 | "image/tiff",
80 | "application/json"
81 | ]
82 | }
83 | },
84 | "/landcover/details": {
85 | "post": {
86 | "description": "This operation classifies the landcover for a given region based on the provided satellite image. The response will contain an image file with the classification along with details about the breakdown of each label in the image.\n\n The provided image must be a Tiff file with 4 bands representing the red, green, blue and near-infrared value of the pixel.\n\nA successful classification will return an image file corresponding to the landcover of the provided image. The following labels are possible with the corresponding color labels:\n- No Data - black (0, 0 ,0)\n- Water - blue (0, 0, 255)\n- Trees - dark green (0, 128, 0)\n- Herbaceous - light green (128, 255, 128)\n- Barren/Impervious - brown (128, 96, 96)\n\nThe label breakdown section contains each label that appears in the image, along with the percentage of pixels that are classified with that label.",
87 | "operationId": "5ada78aab225207e719fa59b",
88 | "summary": "/landcover/details",
89 | "parameters": [
90 | {
91 | "name": "type",
92 | "in": "query",
93 | "description": "File type of the returned image. Supported values are:\n- tiff (default)\n- jpeg\n",
94 | "type": "string",
95 | "default": "tiff",
96 | "enum": [
97 | "tiff",
98 | "jpeg"
99 | ]
100 | },
101 | {
102 | "name": "Content-Type",
103 | "in": "header",
104 | "description": "Media type of the request body. Currently only image/tiff is supported.\n",
105 | "required": true,
106 | "type": "string",
107 | "enum": [
108 | "image/tiff"
109 | ]
110 | }
111 | ],
112 | "responses": {
113 | "200": {
114 | "description": "The response body will contain an image file with the land cover labels along with a dictionary containing all the labels containing a given image and the percent of the image predicted to contain that label. The possible labels are listed below.\n- No Data - black (0, 0 ,0)\n- Water - blue (0, 0, 255)\n- Trees - dark green (0, 128, 0)\n- Herbaceous - light green (128, 255, 128)\n- Barren/Impervious - brown (128, 96, 96)\n\nThe size of the output image will be the same as a minus a 64 pixel border around the image. For example, if the input image is 256 pixels by 256 pixels the output image will be 128 pixels by 128 pixels.",
115 | "examples": {
116 | "application/json": {
117 | "image_data": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAEAAQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDiqKKK+aPkAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD//Z",
118 | "label_breakdown": {
119 | "No Data": 0,
120 | "Trees": 1,
121 | "Water": 0
122 | }
123 | }
124 | }
125 | },
126 | "400": {
127 | "description": "Possible Errors: \r\n\r\n- EmptyImage\r\n
An image was not supplied.\r\n \r\n- InvalidImageFormat\r\n
Input data is not a valid image. \r\n- InvalidImageSize\r\n
Input image is too large or too small. \r\n
"
128 | },
129 | "415": {
130 | "description": "Unsupported media type in the request body. Currently only image/tiff is supported"
131 | }
132 | },
133 | "produces": [
134 | "application/json"
135 | ]
136 | }
137 | }
138 | },
139 | "definitions": {
140 | "GeoTile": {
141 | "type": "object",
142 | "required": [
143 | "lon",
144 | "lat"
145 | ],
146 | "properties": {
147 | "lon": {
148 | "type": "number"
149 | },
150 | "lat": {
151 | "type": "number"
152 | }
153 | }
154 | },
155 | "GeoImage": {
156 | "type": "object",
157 | "required": [
158 | "lon",
159 | "lat",
160 | "location",
161 | "model",
162 | "classification",
163 | "options"
164 | ],
165 | "properties": {
166 | "lon": {
167 | "type": "number"
168 | },
169 | "lat": {
170 | "type": "number"
171 | },
172 | "location": {
173 | "type": "string"
174 | },
175 | "model": {
176 | "type": "string"
177 | },
178 | "classification": {
179 | "type": "string"
180 | },
181 | "options": {
182 | "type": "array",
183 | "items": {
184 | "type": "string"
185 | }
186 | }
187 | }
188 | },
189 | "CacheRequest": {
190 | "type": "object",
191 | "required": [
192 | "lon",
193 | "lat",
194 | "radius",
195 | "tiles"
196 | ],
197 | "properties": {
198 | "lon": {
199 | "type": "number"
200 | },
201 | "lat": {
202 | "type": "number"
203 | },
204 | "radius": {
205 | "type": "number"
206 | },
207 | "model": {
208 | "type": "string"
209 | },
210 | "options": {
211 | "type": "array",
212 | "items": {
213 | "type": "string"
214 | }
215 | },
216 | "tiles": {
217 | "type": "array",
218 | "items": {
219 | "$ref": "#/definitions/GeoTile"
220 | }
221 | }
222 | }
223 | },
224 | "Cache": {
225 | "type": "object",
226 | "properties": {
227 | "available": {
228 | "type": "array",
229 | "items": {
230 | "$ref": "#/definitions/GeoImage"
231 | }
232 | },
233 | "missing": {
234 | "type": "array",
235 | "items": {
236 | "$ref": "#/definitions/GeoImage"
237 | }
238 | }
239 | }
240 | },
241 | "Body": {
242 | "example": "[Binary image data]"
243 | }
244 | }
245 | }
--------------------------------------------------------------------------------
/Examples/base-py/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-py:1.13
2 |
3 | # Example of how to install api-required packages.
4 | #RUN pip3 install \
5 | # numpy \
6 | # pandas \
7 | # matplotlib
8 |
9 | # Copy your API code
10 | COPY . /app/my_api/
11 | COPY ./supervisord.conf /etc/supervisord.conf
12 |
13 | # startup.sh is a helper script
14 | COPY ./startup.sh /
15 | RUN chmod +x /startup.sh
16 |
17 | # Enter your Application Insights instrumentation key to enable Azure monitoring. You can find the key here:
18 | # https://docs.microsoft.com/en-us/azure/azure-monitor/app/create-new-resource#copy-the-instrumentation-key
19 | ENV APPINSIGHTS_INSTRUMENTATIONKEY= \
20 | TRACE_SAMPLING_RATE=1.0
21 |
22 | # The following variables will allow you to filter logs in AppInsights
23 | ENV SERVICE_OWNER=AI4E_Test \
24 | SERVICE_CLUSTER=Local\ Docker \
25 | SERVICE_MODEL_NAME=base-py\ example \
26 | SERVICE_MODEL_FRAMEWORK=Python \
27 | SERVICE_MODEL_FRAMEOWRK_VERSION=3.7 \
28 | ENVSERVICE_MODEL_VERSION=1.0 \
29 | DISABLE_CURRENT_REQUEST_METRIC=False
30 |
31 | # This is the prefix for your API.
32 | # In this example, the default_post function has api_path set to '/example', so the URL will be:
33 | # /v1/my_api/tasker/example - API_PREFIX + api_path
34 | # The supervisor.conf file sets the webserver port to 1212, so if you run this locally, the complete URL will be:
35 | # http://localhost:1212/v1/my_api/tasker/example
36 | ENV API_PREFIX=/v1/my_api/tasker
37 |
38 | ENV PYTHONPATH="${PYTHONPATH}:/app/my_api/"
39 | ENV PYTHONUNBUFFERED=TRUE
40 |
41 | # Expose the port that is to be used when calling your API
42 | EXPOSE 1212
43 |
44 | HEALTHCHECK --interval=1m --timeout=3s --start-period=20s \
45 | CMD curl -f http://localhost:1212/${API_PREFIX}/ || exit 1
46 | ENTRYPOINT [ "/startup.sh" ]
47 | # Comment out the line above and uncomment the following line to debug and see output messages.
48 | #ENTRYPOINT ["gunicorn", "-b", "0.0.0.0:1212", "runserver:app"]
--------------------------------------------------------------------------------
/Examples/base-py/runserver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | # # /ai4e_api_tools has been added to the PYTHONPATH, so we can reference those libraries directly.
4 | from time import sleep
5 | import json
6 | from flask import Flask, request, abort
7 | from ai4e_app_insights_wrapper import AI4EAppInsights
8 | from ai4e_service import APIService
9 |
10 | print('Creating Application')
11 | app = Flask(__name__)
12 |
13 | # Use the AI4EAppInsights library to send log messages. NOT REQUIRED
14 | log = AI4EAppInsights()
15 |
16 | # Use the APIService to executes your functions within a logging trace, supports long-running/async functions,
17 | # handles SIGTERM signals from AKS, etc., and handles concurrent requests.
18 | with app.app_context():
19 | ai4e_service = APIService(app, log)
20 |
21 | # Define a function for processing request data, if applicable. This function loads data or files into
22 | # a dictionary for access in your API function. We pass this function as a parameter to your API setup.
23 | def process_request_data(request):
24 | return_values = {'data': None}
25 | try:
26 | # Attempt to load the body
27 | return_values['data'] = request.get_json()
28 | except:
29 | log.log_error('Unable to load the request data') # Log to Application Insights
30 | return return_values
31 |
32 | # Define a function that runs your model. This could be in a library.
33 | def run_model(taskId, body):
34 | # Update the task status, so the caller knows it has been accepted and is running.
35 | ai4e_service.api_task_manager.UpdateTaskStatus(taskId, 'running model')
36 |
37 | log.log_debug('Running model', taskId) # Log to Application Insights
38 | #INSERT_YOUR_MODEL_CALL_HERE
39 | sleep(10) # replace with real code
40 |
41 | # POST, long-running/async API endpoint example
42 | @ai4e_service.api_async_func(
43 | api_path = '/example',
44 | methods = ['POST'],
45 | request_processing_function = process_request_data, # This is the data process function that you created above.
46 | maximum_concurrent_requests = 3, # If the number of requests exceed this limit, a 503 is returned to the caller.
47 | content_types = ['application/json'],
48 | content_max_length = 1000, # In bytes
49 | trace_name = 'post:my_long_running_funct')
50 | def default_post(*args, **kwargs):
51 | # Since this is an async function, we need to keep the task updated.
52 | taskId = kwargs.get('taskId')
53 | log.log_debug('Started task', taskId) # Log to Application Insights
54 |
55 | # Get the data from the dictionary key that you assigned in your process_request_data function.
56 | request_json = kwargs.get('data')
57 |
58 | if not request_json:
59 | ai4e_service.api_task_manager.FailTask(taskId, 'Task failed - Body was empty or could not be parsed.')
60 | return -1
61 |
62 | # Run your model function
63 | run_model(taskId, request_json)
64 |
65 | # Once complete, ensure the status is updated.
66 | log.log_debug('Completed task', taskId) # Log to Application Insights
67 | # Update the task with a completion event.
68 | ai4e_service.api_task_manager.CompleteTask(taskId, 'completed')
69 |
70 | # GET, sync API endpoint example
71 | @ai4e_service.api_sync_func(api_path = '/echo/', methods = ['GET'], maximum_concurrent_requests = 1000, trace_name = 'get:echo', kwargs = {'text'})
72 | def echo(*args, **kwargs):
73 | return 'Echo: ' + kwargs['text']
74 |
75 | if __name__ == '__main__':
76 | app.run()
--------------------------------------------------------------------------------
/Examples/base-py/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Examples/base-py/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:gunicorn]
5 | directory=/app/my_api/
6 | command=gunicorn -b 0.0.0.0:1212 --workers 4 runserver:app
7 | stdout_logfile=/dev/stdout
8 | stdout_logfile_maxbytes=0
9 | stderr_logfile=/dev/stdout
10 | stderr_logfile_maxbytes=0
11 |
--------------------------------------------------------------------------------
/Examples/base-r/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-r:1.6
2 |
3 | # Install some addtional R packages
4 | #RUN R -e 'install.packages("raster"); library(raster)'
5 | RUN R -e 'print(installed.packages());'
6 |
7 | # Note: supervisor.conf reflects the location and name of your api code.
8 | # If the default (./my_api/plumber_run.R) is renamed, you must change supervisor.conf
9 | # Copy your API code
10 | RUN mkdir -p /app/my_api/
11 | WORKDIR /app/my_api/
12 | COPY ./my_api/ /app/my_api/
13 | COPY ./supervisord.conf /etc/supervisord.conf
14 |
15 | # startup.sh is a helper script
16 | COPY ./startup.sh /
17 | RUN chmod +x /startup.sh
18 |
19 | # Application Insights keys and trace configuration
20 | ENV APPINSIGHTS_INSTRUMENTATIONKEY= \
21 | APPINSIGHTS_LIVEMETRICSSTREAMAUTHENTICATIONAPIKEY=
22 |
23 | # The following variables will allow you to filter logs in AppInsights
24 | ENV SERVICE_OWNER=AI4E_Test \
25 | SERVICE_CLUSTER=Local\ Docker \
26 | SERVICE_MODEL_NAME=base-R\ example \
27 | SERVICE_MODEL_FRAMEWORK=R \
28 | SERVICE_MODEL_FRAMEOWRK_VERSION=microsoft-r-open-3.5.1 \
29 | SERVICE_MODEL_VERSION=1.0
30 |
31 | ENV STORAGE_ACCOUNT_NAME ""
32 | ENV STORAGE_ACCOUNT_KEY ""
33 |
34 | # Expose the port that is to be used when calling your API
35 | EXPOSE 80
36 | HEALTHCHECK --interval=1m --timeout=3s --start-period=20s \
37 | CMD curl -f http://localhost/ || exit 1
38 | ENTRYPOINT [ "/startup.sh" ]
39 |
40 | # Replace the above entrypoint with the following one for faster debugging.
41 | #ENTRYPOINT ["R", "-e", "pr <- plumber::plumb(commandArgs()[4]); pr$run(host='0.0.0.0', port=80)"]
42 | #CMD ["/app/my_api/api_example.R"]
43 |
--------------------------------------------------------------------------------
/Examples/base-r/my_api/Observations.csv:
--------------------------------------------------------------------------------
1 | Heading1,Heading2
2 | a,d
3 | b,e
4 | c,f
--------------------------------------------------------------------------------
/Examples/base-r/my_api/api_example.R:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | library(future)
4 | plan(multiprocess)
5 | library(reticulate)
6 | library(jsonlite)
7 | use_python('/usr/bin/python3', required = TRUE)
8 | source_python("/ai4e_api_tools/sas_blob.py")
9 | source("/ai4e_api_tools/task_management/api_task.R")
10 | source("/ai4e_api_tools/ai4e_app_insights.R")
11 |
12 | write.table(paste0(FALSE), file = "running.txt")
13 |
14 | STORAGE_ACCOUNT_NAME <- Sys.getenv("STORAGE_ACCOUNT_NAME")
15 | STORAGE_ACCOUNT_KEY <- Sys.getenv("STORAGE_ACCOUNT_KEY")
16 |
17 | # Helper function to write dataframes to csv
18 | WriteBlob <- function(dataframe_to_write, container_uri, blob_name, include_row_names) {
19 | # Create a temp file stream to write the data.
20 | tmp <- file(tempfile())
21 | open(tmp, "w+")
22 | write.csv(dataframe_to_write, file=tmp, row.names = include_row_names, append=FALSE, fileEncoding="UTF-8")
23 |
24 | # Read the data to save to the blob.
25 | seek(tmp, where=0)
26 | data_to_save <- paste(readLines(tmp, n=-1), collapse="\n")
27 |
28 | # Upload the data to a blob using the AI for Earth sas_blob helper library.
29 | sas_blob_helper = SasBlob()
30 | sas_blob_helper$write_blob_from_text(container_uri, blob_name, data_to_save)
31 | close(tmp)
32 | }
33 |
34 | # Primary working function
35 | ProcessData<-function(taskId, user_data){
36 | tryCatch({
37 | # Update task status at any time to let users know how your API is progressing.
38 | UpdateTaskStatus(taskId, 'running')
39 |
40 | # Get input data.
41 | container_name <- user_data$container_name
42 | run_id <- user_data$run_id
43 |
44 | # For this example, create a SAS-based writable container to use.
45 | access_duration_hrs <- 1
46 | sas_blob_helper <- SasBlob()
47 | container_sas_uri <- sas_blob_helper$create_writable_container_sas(STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY, container_name, access_duration_hrs)
48 | print(paste("Container uri: ", container_sas_uri))
49 |
50 | observation_data_file <- "/app/my_api/Observations.csv"
51 | observation_data <- read.csv(observation_data_file)
52 | blob_name <- "Observations.csv"
53 |
54 | blob_sas_uri <- sas_blob_helper$write_blob_from_text(container_sas_uri, blob_name, observation_data)
55 | print(paste("Blob uri: ", blob_sas_uri))
56 |
57 | #INSERT_YOUR_MODEL_CALL_HERE
58 |
59 | # Download the Observation.csv from Azure Blob Storage and read it into observations.
60 | local_file <- "./local_observation.csv"
61 | observations_csv <- sas_blob_helper$save_local_text(blob_sas_uri, local_file)
62 | observations <- read.csv(local_file)
63 |
64 | # Write the observations output data to the output_dir/output_name.csv Azure Blob.
65 | #dir = WriteBlob(observations, container_sas_uri, paste(run_id, "output_dir/output_name.csv", sep= "/"), include_row_names=FALSE)
66 |
67 | # Update the task to let the caller know their request has been completed.
68 | UpdateTaskStatus(taskId, 'completed')
69 | }, error = function(err) {
70 | print(paste0(err))
71 | log_exception(paste0(err), taskId)
72 | UpdateTaskStatus(taskId, paste("failed - ", err))
73 | write.table(paste0(FALSE), file = "running.txt")
74 | })
75 | }
76 |
77 | #* Test process
78 | #* @post /test
79 | function(req){
80 | print("running")
81 |
82 | task <- AddTask(req)
83 | taskId <- task$uuid
84 | sas_blob_helper = SasBlob()
85 |
86 | is_processing <- read.table("running.txt")
87 |
88 | # R is single-threaded, so we only process one response at a time.
89 | # Parallel requests are handled by AKS auto-scaling.
90 | if (is_processing == "TRUE")
91 | {
92 | log_warn("Too many requests are being processed.", taskId)
93 | res$status <- 429 # Too manay requests
94 | res$body <- "Too many requests are being processed. Retry with a backoff."
95 | return(res)
96 | }
97 |
98 | write.table(paste0(TRUE), file = "running.txt")
99 |
100 | tryCatch({
101 | # Get the request body data and store into input_data.
102 | body <- req$postBody
103 | input_data <- fromJSON(body, simplifyDataFrame=TRUE)
104 | directory <- input_data$run_id
105 |
106 | # Run the model in a new "thread" so that we can return the taskId, which lets caller request the status at any time.
107 | #promise <- future(ProcessData(taskId, input_data))
108 | # Comment the above line and uncomment the below line to debug. taskId will not be returned until completion.
109 | ProcessData(taskId, input_data)
110 | message <- paste0("Starting task: ", taskId, " Output files will be placed in ", input_data$run_id, " directory.")
111 |
112 | }, error = function(err) {
113 | print(paste0(err))
114 | log_exception(paste0(err), taskId)
115 | UpdateTaskStatus(taskId, paste("failed - ", err))
116 | res$status <- 400
117 | res$body <- "Bad request. Please ensure JSON request body is properly formatted."
118 | return(res)
119 | })
120 |
121 | data.frame(message, taskId, directory)
122 | }
123 |
124 | #* Get status of task by id
125 | #* @param taskId The id of the task
126 | #* @get /task/
127 | GetProcessDataTaskStatus<-function(taskId){
128 | status <- GetTaskStatus(taskId)
129 | return(status)
130 | }
131 |
132 | #* Provide healthcheck endpoint
133 | #* @get /
134 | GetProcessDataTaskStatus<-function(taskId){
135 | return("OK")
136 | }
137 |
138 | # Please have an empty last line in the end; otherwise, you will see an error when starting a webserver
139 |
--------------------------------------------------------------------------------
/Examples/base-r/my_api/plumber_run.R:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | library(plumber)
4 | r <- plumb("/app/my_api/api_example.R")
5 | r$run(port=80, host="0.0.0.0")
--------------------------------------------------------------------------------
/Examples/base-r/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Examples/base-r/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:plumber]
5 | command=/usr/bin/R R CMD BATCH /app/my_api/plumber_run.R
6 | stdout_logfile=/dev/stdout
7 | stdout_logfile_maxbytes=0
8 | stderr_logfile=/dev/stdout
9 | stderr_logfile_maxbytes=0
--------------------------------------------------------------------------------
/Examples/blob-mount-py/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/blob-py:1.3
2 |
3 | # Copy blob connection information
4 | COPY ./blob_mount.json /app/fuse/blob_mount.json
5 |
6 | # Note: supervisor.conf reflects the location and name of your api code.
7 | # If the default (./my_api/runserver.py) is renamed, you must change supervisor.conf
8 | # Copy API code
9 | COPY ./my_api /app/my_api/
10 | COPY ./supervisord.conf /etc/supervisord.conf
11 |
12 | # startup.sh is a helper script
13 | COPY ./startup.sh /
14 | RUN chmod +x /startup.sh
15 |
16 | ENV API_PREFIX=/v1/blob
17 |
18 | # Expose the port that is to be used when calling your API
19 | EXPOSE 1212
20 | ENTRYPOINT [ "/startup.sh" ]
--------------------------------------------------------------------------------
/Examples/blob-mount-py/README.md:
--------------------------------------------------------------------------------
1 | ## blob-mount-py example
2 | This example demonstrates how to mount an Azure Blob as a local virtual file system. It utilizes [Azure Storage Fuse](https://github.com/Azure/azure-storage-fuse) to achieve this mounting.
3 |
4 | ### Running
5 | 1. To get started, you will need to [create a new Azure Blob Container](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-portal) with a file named `config.csv`. We recommend using [Azure Storage Explorer](https://azure.microsoft.com/en-us/features/storage-explorer/) to aid in storage upload/download.
6 | 2. From within the Azure Portal or within Azure Storage Explorer, copy your blob's storage key.
7 | 3. Modify the [blob_mount.json](./blob_mount.json) file as follows:
8 | - accountName: This is the name of your blob storage account.
9 | - accountKey: This is the key that you copied in step 2.
10 | - containerName: This is the name of the container that you created in step 1. It is the container that will be mapped, locally.
11 | - mappedDirectory: This is the local path where your container will be mounted.
12 |
13 | Note: You may map as many containers as you would like in this file. The blob mounter will mount all of them.
14 | 4. Build your container with: `docker build .`. The final output line will state `Successfully built `.
15 | 5. Run your example: `docker run -p 1212:1212 --cap-add SYS_ADMIN --device /dev/fuse `.
16 |
--------------------------------------------------------------------------------
/Examples/blob-mount-py/blob_mount.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "accountName":"your_azure_storage_account_name",
4 | "accountKey":"your_azure_storage_account_key",
5 | "containerName":"input",
6 | "mappedDirectory":"/mnt/input"
7 | }
8 | ]
--------------------------------------------------------------------------------
/Examples/blob-mount-py/my_api/runserver.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft Corporation. All rights reserved.
2 | # Licensed under the MIT License.
3 | # # /ai4e_api_tools has been added to the PYTHONPATH, so we can reference those
4 | # libraries directly.
5 | from flask import Flask, request
6 | from time import sleep
7 | import json
8 | from ai4e_app_insights_wrapper import AI4EAppInsights
9 | from ai4e_service import APIService
10 | import sys
11 | import os
12 | from os import getenv
13 |
14 | print("Creating Application")
15 |
16 | app = Flask(__name__)
17 | blob_mapped_dir = "/mnt/input"
18 |
19 | # Use the AI4EAppInsights library to send log messages.
20 | log = AI4EAppInsights()
21 |
22 | # Use the APIService to executes your functions within a logging trace, supports long-running/async functions,
23 | # handles SIGTERM signals from AKS, etc., and handles concurrent requests.
24 | with app.app_context():
25 | ai4e_service = APIService(app, log)
26 |
27 | # Define a function for processing request data, if appliciable. This function loads data or files into
28 | # a dictionary for access in your API function. We pass this function as a parameter to your API setup.
29 | def process_request_data(request):
30 | return_values = {'image_bytes': None}
31 | try:
32 | # Attempt to load the body
33 | return_values['image_bytes'] = BytesIO(request.data)
34 | except:
35 | log.log_error('Unable to load the request data') # Log to Application Insights
36 | return return_values
37 |
38 | # POST, long-running/async API endpoint example
39 | @ai4e_service.api_sync_func(
40 | api_path = '/example',
41 | methods = ['POST'],
42 | maximum_concurrent_requests = 10, # If the number of requests exceed this limit, a 503 is returned to the caller.
43 | trace_name = 'post:read_blob_file')
44 | def post(*args, **kwargs):
45 | # The AddTask function returns a dictonary of task information:
46 | # - uuid: taskId used to update/retrieve task status
47 | # - status: string that was passed via the AddTask or UpdateTaskStatus function
48 | # - timestamp
49 | # - endpoint: passed via the TaskManager constructor
50 |
51 | filename = ""
52 | data_path = os.path.join(blob_mapped_dir, filename)
53 | with open(data_path, "r") as file_from_blob:
54 | return "Blob file contents: " + file_from_blob.read()
55 |
56 | if __name__ == '__main__':
57 | app.run()
--------------------------------------------------------------------------------
/Examples/blob-mount-py/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python /app/fuse/blob_mounter.py
3 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Examples/blob-mount-py/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:gunicorn]
5 | directory=/app/my_api/
6 | command=gunicorn -b 0.0.0.0:1212 --workers 1 runserver:app
7 | stdout_logfile=/dev/stdout
8 | stdout_logfile_maxbytes=0
9 | stderr_logfile=/dev/stdout
10 | stderr_logfile_maxbytes=0
11 |
--------------------------------------------------------------------------------
/Examples/helpers/aad_blob_helper.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 | from os import getenv
3 | import tempfile
4 | from aad_blob import AadBlob
5 | import pandas as pd
6 |
7 | aad_blob_connector = AadBlob(
8 | getenv('AAD_TENANT_ID'),
9 | getenv('AAD_APPLICATION_ID'),
10 | getenv('AAD_APPLICATION_SECRET'),
11 | getenv('AAD_ACCOUNT_NAME'),
12 | getenv('LOCAL_BLOB_TEST_DIRECTORY', None))
13 |
14 | class BlobHelper:
15 | def __init__(self, container_name, run_directory):
16 | self.aad_blob_connector = aad_blob_connector
17 | self.container_name = container_name
18 | self.run_directory = run_directory
19 |
20 | def write_csv(self, data, filename, top_directory, path = None):
21 | if (path is None):
22 | blob_name = '{}/{}/{}'.format(self.run_directory, top_directory, filename)
23 | else:
24 | blob_name = '{}/{}/{}/{}'.format(self.run_directory, top_directory, path, filename)
25 |
26 | csv_str = data.to_csv(encoding='utf-8')
27 | self.aad_blob_connector.write_blob_from_text(self.container_name, blob_name, csv_str.encode())
28 |
29 | def write_png(self, data, filename, top_directory, path = None):
30 | temp, temp_path = tempfile.mkstemp()
31 |
32 | data.savefig(temp_path)
33 | data.close()
34 |
35 | if (path is None):
36 | blob_name = '{}/{}/{}'.format(self.run_directory, top_directory, filename)
37 | else:
38 | blob_name = '{}/{}/{}/{}'.format(self.run_directory, top_directory, path, filename)
39 |
40 | self.aad_blob_connector.create_blob_from_path(self.container_name, blob_name, temp_path)
41 |
42 | def get_csv(self, csv_filename, top_directory, path = None):
43 | blob_name = ''
44 | if (path is None):
45 | blob_name = '{}/{}/{}'.format(self.run_directory, top_directory, csv_filename)
46 | else:
47 | blob_name = '{}/{}/{}/{}'.format(self.run_directory, top_directory, path, csv_filename)
48 |
49 | try:
50 | blob_text = self.aad_blob_connector.get_blob_to_text(self.container_name, blob_name)
51 | except Exception as e:
52 | print('Exception in get_csv:')
53 | print(e)
54 | raise ValueError('Blob: {} not found.'.format(blob_name))
55 |
56 | static_data = pd.read_csv(StringIO(blob_text))
57 | return static_data
--------------------------------------------------------------------------------
/Examples/pytorch/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-py:latest
2 |
3 | RUN echo "source activate ai4e_py_api" >> ~/.bashrc \
4 | && conda install -c conda-forge -n ai4e_py_api numpy pandas scipy \
5 | && conda install -y -c pytorch -n ai4e_py_api pytorch torchvision \
6 | && conda install -y -c kmdouglass -n ai4e_py_api tifffile
7 |
8 | # PIL will be installed with pytorch
9 |
10 | # Note: supervisor.conf reflects the location and name of your api code.
11 | # If the default (./my_api/runserver.py) is renamed, you must change supervisor.conf
12 | # Copy your API code
13 | COPY ./pytorch_api /app/pytorch_api/
14 | COPY ./supervisord.conf /etc/supervisord.conf
15 |
16 | # startup.sh is a helper script
17 | COPY ./startup.sh /
18 | RUN chmod +x /startup.sh
19 |
20 | # Application Insights keys and trace configuration
21 | ENV APPINSIGHTS_INSTRUMENTATIONKEY= \
22 | TRACE_SAMPLING_RATE=1.0
23 |
24 | # The following variables will allow you to filter logs in AppInsights
25 | ENV SERVICE_OWNER=AI4E_PyTorch_Example \
26 | SERVICE_CLUSTER=Local\ Docker \
27 | SERVICE_MODEL_NAME=AI4E_PyTorch_Example \
28 | SERVICE_MODEL_FRAMEWORK=Python \
29 | SERVICE_MODEL_FRAMEOWRK_VERSION=3.6.6 \
30 | SERVICE_MODEL_VERSION=1.0
31 |
32 | ENV API_PREFIX=/v1/pytorch_api
33 |
34 | # Expose the port that is to be used when calling your API
35 | EXPOSE 80
36 | HEALTHCHECK --interval=1m --timeout=3s --start-period=20s \
37 | CMD curl -f http://localhost/${API_PREFIX}/ || exit 1
38 | ENTRYPOINT [ "/startup.sh" ]
39 |
--------------------------------------------------------------------------------
/Examples/pytorch/README.md:
--------------------------------------------------------------------------------
1 | # PyTorch example
2 |
3 | This example shows you how to deploy a PyTorch model via an AI for Earth container. In this example we use an image classification model trained on the iNaturalist 2018 dataset.
4 |
5 |
6 | ## Download the model
7 |
8 | You can download an Inception v3 mdoel trained on the iNaturalist dataset from this [page](https://github.com/macaodha/inat_comp_2018), at the link in "we also provide a trained model that can be downloaded from _here_".
9 |
10 | Place the downloaded model file [iNat_2018_InceptionV3.pth.tar](http://vision.caltech.edu/~macaodha/inat2018/iNat_2018_InceptionV3.pth.tar) in the `pytorch_api` folder, which will be copied to the Docker container (see the `COPY` commands in `Dockerfile`). There are other ways of accessing a model, such as placing it in a Azure blob storage container (a unit of blob storage, do not confuse with Docker _containers_) and mount that blob container.
11 |
12 | Copy the file `inception.py` from the [inat_comp_2018](https://github.com/macaodha/inat_comp_2018) page and place it in the `pytorch_api` folder. We instantiate an Inception model in `pytorch_api/runserver.py` and load its weights from the model file.
13 |
14 | ## Modify Dockerfile
15 |
16 | The `Dockerfile` in this example is a modified version of `base-py/Dockerfile`. The only modification is the additional commands to install `scipy` and `pytorch` packages.
17 |
18 |
19 | ## Modify `supervisord.conf`
20 | If you changed the name of the destination folder in the Dockerfile where your API folder is copied to (here we used `/api/pytorch_api/`), remember to modify two places in `supervisord.conf` that uses the location of the API folder.
21 |
22 |
23 | ## Example service
24 |
25 | This example API endpoint takes an input image, performs image classification on it, and returns a string that indicates the most likely category (a numerical label for each of the 8142 species in the iNat categories) the classifier has determined. You can look up what category these numerical labels correspond to from the file downloadable on the iNat 2018 GitHub page (see first point in the Updates section).
26 |
27 | Build the docker image:
28 | ```
29 | docker build . -t pytorch_example:1
30 | ```
31 |
32 | Run image locally:
33 | ```
34 | docker run -p 8081:80 "pytorch_example:1"
35 | ```
36 |
37 | Run an instance of this image interactively and start bash to debug:
38 | ```
39 | docker run -it pytorch_example:1 /bin/bash
40 | ```
41 |
42 |
43 | ## Testing and calling the service
44 |
45 | Testing locally, the end point would be at
46 |
47 | ```
48 | http://localhost:8081/v1/pytorch_api/classify
49 | ```
50 |
51 | You can use a tool like Postman to test the end point:
52 |
53 | 
54 |
55 |
--------------------------------------------------------------------------------
/Examples/pytorch/pytorch_api/pytorch_classifier.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | from inception import Inception3
6 | from PIL import Image
7 |
8 | use_gpu = True
9 | dtype = torch.float32
10 |
11 | device = torch.device('cuda') if use_gpu and torch.cuda.is_available() else torch.device('cpu')
12 | print('Using device: ', device)
13 |
14 |
15 | def load_model(model_path, device=device):
16 | print('pytorch_classifier.py: Loading model...')
17 | num_classes = 8142
18 |
19 | checkpoint = torch.load(model_path, map_location=device)
20 |
21 | # reference: https://github.com/macaodha/inat_comp_2018/blob/master/train_inat.py
22 | model = Inception3(transform_input=True)
23 | model.fc = nn.Linear(2048, num_classes)
24 | model.aux_logits = False
25 |
26 | model.load_state_dict(checkpoint['state_dict'])
27 | model = model.to(device=device, dtype=dtype)
28 | model.eval() # set model to evaluation mode
29 | print('pytorch_classifier.py: model loaded.')
30 | return model
31 |
32 |
33 | def classify(model, image_bytes):
34 | img = Image.open(image_bytes)
35 |
36 | image_np = np.asarray(img, np.uint8)
37 |
38 | # swap color axis because numpy image is H x W x C, torch image is C X H X W
39 | image_np = image_np.transpose((2, 0, 1))
40 | image_np = image_np[:3, :, :] # Remove the alpha channel
41 | image_np = np.expand_dims(image_np, axis=0) # add a batch dimension
42 | img_input = torch.from_numpy(image_np).type(torch.float32).to(device=device, dtype=dtype)
43 |
44 | with torch.no_grad():
45 | scores = model(img_input)
46 |
47 | scores = scores.cpu().data.numpy()
48 | clss = np.argmax(scores[0])
49 | return 'Most likely category is {}'.format(str(clss))
50 |
--------------------------------------------------------------------------------
/Examples/pytorch/pytorch_api/runserver.py:
--------------------------------------------------------------------------------
1 | # /ai4e_api_tools has been added to the PYTHONPATH, so we can reference those
2 | # libraries directly.
3 | from flask import Flask, request, abort
4 | from flask_restful import Resource, Api
5 | from ai4e_app_insights import AppInsights
6 | from ai4e_app_insights_wrapper import AI4EAppInsights
7 | from ai4e_service import APIService
8 | from PIL import Image
9 | import pytorch_classifier
10 | from io import BytesIO
11 | from os import getenv
12 |
13 | print("Creating Application")
14 |
15 | ACCEPTED_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
16 |
17 | app = Flask(__name__)
18 |
19 | # Use the AI4EAppInsights library to send log messages.
20 | log = AI4EAppInsights()
21 |
22 | # Use the APIService to executes your functions within a logging trace, supports long-running/async functions,
23 | # handles SIGTERM signals from AKS, etc., and handles concurrent requests.
24 | with app.app_context():
25 | ai4e_service = APIService(app, log)
26 |
27 | # Load the model
28 | # The model was copied to this location when the container was built; see ../Dockerfile
29 | model_path = '/app/pytorch_api/iNat_2018_InceptionV3.pth.tar'
30 | model = pytorch_classifier.load_model(model_path)
31 |
32 | # Define a function for processing request data, if appliciable. This function loads data or files into
33 | # a dictionary for access in your API function. We pass this function as a parameter to your API setup.
34 | def process_request_data(request):
35 | print('Processing data...')
36 | return_values = {'image_bytes': None}
37 | try:
38 | # Attempt to load the body
39 | return_values['image_bytes'] = BytesIO(request.data)
40 | except:
41 | log.log_error('Unable to load the request data') # Log to Application Insights
42 | return return_values
43 |
44 | # POST, async API endpoint example
45 | @ai4e_service.api_sync_func(
46 | api_path = '/classify',
47 | methods = ['POST'],
48 | request_processing_function = process_request_data, # This is the data process function that you created above.
49 | maximum_concurrent_requests = 5, # If the number of requests exceed this limit, a 503 is returned to the caller.
50 | content_types = ACCEPTED_CONTENT_TYPES,
51 | content_max_length = 10000, # In bytes
52 | trace_name = 'post:classify')
53 | def post(*args, **kwargs):
54 | print('Post called')
55 | image_bytes = kwargs.get('image_bytes')
56 | clss = pytorch_classifier.classify(model, image_bytes)
57 | # in this example we simply return the numerical ID of the most likely category determined
58 | # by the model
59 | return clss
60 |
61 | if __name__ == '__main__':
62 | app.run()
63 |
--------------------------------------------------------------------------------
/Examples/pytorch/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Examples/pytorch/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:uwsgi]
5 | directory=/app/pytorch_api/
6 | command=/usr/local/envs/ai4e_py_api/bin/uwsgi --virtualenv /usr/local/envs/ai4e_py_api --callable app --http 0.0.0.0:80 -b 32768 --wsgi-disable-file-wrapper --die-on-term --enable-threads --wsgi-file /app/pytorch_api/runserver.py --log-date="%%Y-%%m-%%d %%H:%%M:%%S" --logformat-strftime
7 | stdout_logfile=/dev/stdout
8 | stdout_logfile_maxbytes=0
9 | stderr_logfile=/dev/stdout
10 | stderr_logfile_maxbytes=0
--------------------------------------------------------------------------------
/Examples/screenshots/CustomVisionIterationID.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/CustomVisionIterationID.jpg
--------------------------------------------------------------------------------
/Examples/screenshots/CustomVisionSettings.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/CustomVisionSettings.jpg
--------------------------------------------------------------------------------
/Examples/screenshots/QuickstartResourceGroup.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/QuickstartResourceGroup.jpg
--------------------------------------------------------------------------------
/Examples/screenshots/api_key1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/api_key1.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/api_key2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/api_key2.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/app_insights1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/app_insights1.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/app_insights2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/app_insights2.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/app_insights3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/app_insights3.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/blob1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/blob1.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/blob4.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/blob4.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/blob_key.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/blob_key.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/blob_upload.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/blob_upload.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/create_ACR-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/create_ACR-1.png
--------------------------------------------------------------------------------
/Examples/screenshots/create_ACR-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/create_ACR-2.png
--------------------------------------------------------------------------------
/Examples/screenshots/create_ACR-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/create_ACR-3.png
--------------------------------------------------------------------------------
/Examples/screenshots/postman_header_content_type.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_header_content_type.png
--------------------------------------------------------------------------------
/Examples/screenshots/postman_json.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_json.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/postman_pytorch1.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_pytorch1.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/postman_pytorch2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_pytorch2.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/postman_pytorch_api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_pytorch_api.png
--------------------------------------------------------------------------------
/Examples/screenshots/postman_tf_api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_tf_api.png
--------------------------------------------------------------------------------
/Examples/screenshots/postman_tf_async_api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/postman_tf_async_api.png
--------------------------------------------------------------------------------
/Examples/screenshots/resource_group.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/resource_group.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/resource_group_3.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/resource_group_3.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/run_ACI-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/run_ACI-1.png
--------------------------------------------------------------------------------
/Examples/screenshots/startup_fix.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/startup_fix.PNG
--------------------------------------------------------------------------------
/Examples/screenshots/storage_explorer_tf_out.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Examples/screenshots/storage_explorer_tf_out.png
--------------------------------------------------------------------------------
/Examples/tensorflow/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/aiforearth/base-py:1.13-cuda-9.0
2 |
3 | RUN pip3 install tensorflow==1.12 pillow numpy pandas
4 |
5 | # Copy your API code
6 | COPY ./tf_iNat_api /app/tf_iNat_api/
7 | COPY ./supervisord.conf /etc/supervisord.conf
8 | # startup.sh is a helper script
9 | COPY ./startup.sh /
10 | RUN chmod +x /startup.sh
11 |
12 | # Application Insights keys and trace configuration
13 | ENV APPINSIGHTS_INSTRUMENTATIONKEY= \
14 | TRACE_SAMPLING_RATE=1.0
15 |
16 | # The following variables will allow you to filter logs in AppInsights
17 | ENV SERVICE_OWNER=AI4E_PyTorch_Example \
18 | SERVICE_CLUSTER=Local\ Docker \
19 | SERVICE_MODEL_NAME=AI4E_PyTorch_Example \
20 | SERVICE_MODEL_FRAMEWORK=Python \
21 | SERVICE_MODEL_FRAMEOWRK_VERSION=3.6.6 \
22 | SERVICE_MODEL_VERSION=1.0
23 |
24 | ENV API_PREFIX=/v1/tf_iNat_api
25 |
26 | ENV STORAGE_ACCOUNT_NAME= \
27 | STORAGE_ACCOUNT_KEY=
28 |
29 | ENV PYTHONPATH="${PYTHONPATH}:/app/my_api/"
30 | ENV PYTHONUNBUFFERED=TRUE
31 |
32 | WORKDIR /app/tf_iNat_api
33 |
34 | # Expose the port that is to be used when calling your API
35 | EXPOSE 80
36 | HEALTHCHECK --interval=1m --timeout=3s --start-period=20s \
37 | CMD curl -f http://localhost/${API_PREFIX}/ || exit 1
38 | #ENTRYPOINT [ "/startup.sh" ]
39 |
40 | # Use the following entrypoint to debug issues.
41 | ENTRYPOINT ["gunicorn", "-b", "0.0.0.0:80", "runserver:app"]
42 |
--------------------------------------------------------------------------------
/Examples/tensorflow/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlow example
2 |
3 | This example shows you how to deploy a TensorFlow model via an AI for Earth container. In this example we use an object detection model trained on the iNaturalist 2018 dataset.
4 |
5 | In this example, the user will send an image to the API via a POST call. It is a long-running API, so a task ID will be returned when the endpoint is called. The API creates a SAS-keyed container within the API owner's Azure storage account. The SAS URL is returned to the caller via a status update.
6 |
7 | ## Download the model
8 |
9 | You can download one of the two models trained on the iNaturalist dataset on the [Tensorflow detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md), in section _iNaturalist Species-trained models_. Download the resnet50 model (smaller) instead of resnet101 to have fewer memory usage issues during testing. The file is about 0.5 GB in size. After you download and unzip the folder, find `frozen_inference_graph.pb` and move this file to directory `tf_iNat_api` at the current directory.
10 |
11 | In this example, we copy the entire directory `tf_iNat_api` to the Docker container (see the `COPY` commands in `Dockerfile`), but there are other ways of accessing a model, such as placing it in a Azure blob storage container (a unit of blob storage, do not confuse with Docker _containers_) and mount that blob container.
12 |
13 | ## Modify Dockerfile
14 |
15 | The `Dockerfile` in this example is a modified version of `base-py/Dockerfile`. The only modification is the additional commands to install TensorFlow and Pillow packages.
16 |
17 |
18 | ## Modify `supervisord.conf`
19 | If you changed the name of the destination folder in the Dockerfile where your API folder is copied to (here we used `/api/tf_iNat_api/`), remember to modify two places in `supervisord.conf` that uses the location of the API folder.
20 |
21 |
22 | ## Download some sample images
23 | You can download sample images from the iNat 2018 dataset from this [site](https://docs.google.com/spreadsheets/d/1JHn6J_9HBYyN5kaVrH1qcc3VMyxOsV2II8BvSwufM54).
24 |
25 |
26 | ## Example service
27 |
28 | This example API endpoint takes an input image, performs object detection on it, renders the bounding boxes on the image (only if the confidence of the detected box is above 0.5, which is the `confidence_threshold` you can change in `tf_iNat_api/runserver.py`) and returns the annotated image. This is to demonstrate how to handle image input and output. Realistically you would probably return the coordinates of the bounding boxes and predicted categories in a json, rather than the rendered image.
29 |
30 | Build the docker image (need to be in the Examples/tensorflow directory where the `Dockerfile` is):
31 | ```
32 | docker build . -t tensorflow_example:1
33 | ```
34 |
35 | Run image locally:
36 | ```
37 | docker run -p 8081:80 "tensorflow_example:1"
38 | ```
39 |
40 | For this async API example, we saved the resulting imgae from the long running process to blob storage. You need to create a storage account with Blob Storage, and assign the storage account name and the key (secondary) to `STORAGE_ACCOUNT_NAME` and `STORAGE_ACCOUNT_KEY` inside the Dockerfile.
41 |
42 | Run an instance of this image interactively and start bash to debug:
43 | ```
44 | docker run -it tensorflow_example:1 /bin/bash
45 | ```
46 |
47 |
48 | ## Testing and calling the service
49 |
50 | Testing locally, the end point would be at
51 |
52 | ```
53 | http://localhost:8081/v1/tf_iNat_api/detect
54 | ```
55 |
56 | You can use a tool like Postman to test the end point:
57 |
58 | 
59 |
60 | In the _Body_ tab of Postman where you specify the body data to go with the POST request, you can upload the image you'd like to detect animals on as binary data under the _binary_ option. You also need to set the content type of the binary file to "image/jpeg" in the Headers tab, as follows:
61 |
62 |
63 | 
64 |
65 |
66 | You can see the output image with the detection bounding boxes labeled saved to your blob storage using Azure Storage Explorer (screenshot below) if you own or have access to that storage account, or you can download it using the SAS URL that was returned to the caller via another status update call.
67 |
68 | 
69 |
70 |
--------------------------------------------------------------------------------
/Examples/tensorflow/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | /usr/bin/supervisord
--------------------------------------------------------------------------------
/Examples/tensorflow/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 |
4 | [program:uwsgi]
5 | directory=/app/tf_iNat_api/
6 | command=/usr/local/envs/ai4e_py_api/bin/uwsgi --virtualenv /usr/local/envs/ai4e_py_api --callable app --http 0.0.0.0:80 -b 32768 --wsgi-disable-file-wrapper --die-on-term --enable-threads --wsgi-file /app/tf_iNat_api/runserver.py --log-date="%%Y-%%m-%%d %%H:%%M:%%S" --logformat-strftime
7 | stdout_logfile=/dev/stdout
8 | stdout_logfile_maxbytes=0
9 | stderr_logfile=/dev/stdout
10 | stderr_logfile_maxbytes=0
11 |
12 |
13 |
--------------------------------------------------------------------------------
/Examples/tensorflow/tf_iNat_api/runserver.py:
--------------------------------------------------------------------------------
1 | # /ai4e_api_tools has been added to the PYTHONPATH, so we can reference those
2 | # libraries directly.
3 | import json
4 | from flask import Flask, request, abort
5 | from ai4e_app_insights_wrapper import AI4EAppInsights
6 | from ai4e_service import APIService
7 | from sas_blob import SasBlob
8 | from PIL import Image
9 | import tf_detector
10 | from io import BytesIO
11 | from os import getenv
12 | import uuid
13 | import sys
14 | import numpy as np
15 |
16 | print("Creating Application")
17 |
18 | ACCEPTED_CONTENT_TYPES = ['image/png', 'application/octet-stream', 'image/jpeg']
19 | blob_access_duration_hrs = 1
20 |
21 | app = Flask(__name__)
22 |
23 | # Use the AI4EAppInsights library to send log messages.
24 | log = AI4EAppInsights()
25 |
26 | # Use the APIService to executes your functions within a logging trace, supports long-running/async functions,
27 | # handles SIGTERM signals from AKS, etc., and handles concurrent requests.
28 | with app.app_context():
29 | ai4e_service = APIService(app, log)
30 |
31 | # Load the model
32 | # The model was copied to this location when the container was built; see ../Dockerfile
33 | model_path = '/app/tf_iNat_api/frozen_inference_graph.pb'
34 | detection_graph = tf_detector.load_model(model_path)
35 |
36 | # Define a function for processing request data, if appliciable. This function loads data or files into
37 | # a dictionary for access in your API function. We pass this function as a parameter to your API setup.
38 | def process_request_data(request):
39 | return_values = {'image_bytes': None}
40 | try:
41 | # Attempt to load the body
42 | return_values['image_bytes'] = BytesIO(request.data)
43 | except:
44 | log.log_error('Unable to load the request data') # Log to Application Insights
45 | return return_values
46 |
47 | # POST, async API endpoint example
48 | @ai4e_service.api_async_func(
49 | api_path = '/detect',
50 | methods = ['POST'],
51 | request_processing_function = process_request_data, # This is the data process function that you created above.
52 | maximum_concurrent_requests = 5, # If the number of requests exceed this limit, a 503 is returned to the caller.
53 | content_types = ACCEPTED_CONTENT_TYPES,
54 | content_max_length = 10000, # In bytes
55 | trace_name = 'post:detect')
56 | def detect(*args, **kwargs):
57 | print('runserver.py: detect() called, generating detections...')
58 | image_bytes = kwargs.get('image_bytes')
59 | taskId = kwargs.get('taskId')
60 |
61 | # Update the task status, so the caller knows it has been accepted and is running.
62 | ai4e_service.api_task_manager.UpdateTaskStatus(taskId, 'running - generate_detections')
63 |
64 | try:
65 | image = tf_detector.open_image(image_bytes)
66 | boxes, scores, clsses, image = tf_detector.generate_detections(
67 | detection_graph, image)
68 |
69 | ai4e_service.api_task_manager.UpdateTaskStatus(taskId, 'rendering boxes')
70 |
71 | # image is modified in place
72 | # here confidence_threshold is hardcoded, but you can ask that as a input from the request
73 | tf_detector.render_bounding_boxes(
74 | boxes, scores, clsses, image, confidence_threshold=0.5)
75 |
76 | print('runserver.py: detect(), rendering and saving result image...')
77 | # save the PIL Image object to a ByteIO stream so that it can be written to blob storage
78 | output_img_stream = BytesIO()
79 | image.save(output_img_stream, format='jpeg')
80 | output_img_stream.seek(0)
81 |
82 | sas_blob_helper = SasBlob()
83 | # Create a unique name for a blob container
84 | container_name = str(uuid.uuid4()).replace('-','')
85 |
86 | # Create a writable sas container and return its url
87 | sas_url = sas_blob_helper.create_writable_container_sas(
88 | getenv('STORAGE_ACCOUNT_NAME'), getenv('STORAGE_ACCOUNT_KEY'), container_name, blob_access_duration_hrs)
89 |
90 | # Write the image to the blob
91 | sas_blob_helper.write_blob(sas_url, 'detect_output.jpg', output_img_stream)
92 |
93 | ai4e_service.api_task_manager.CompleteTask(taskId, 'completed - output written to: ' + sas_url)
94 | print('runserver.py: detect() finished.')
95 | except:
96 | log.log_exception(sys.exc_info()[0], taskId)
97 | ai4e_service.api_task_manager.FailTask(taskId, 'failed: ' + str(sys.exc_info()[0]))
98 |
99 | if __name__ == '__main__':
100 | app.run()
101 |
--------------------------------------------------------------------------------
/Examples/tensorflow/tf_iNat_api/tf_detector.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import PIL.Image as Image
4 | import PIL.ImageColor as ImageColor
5 | import PIL.ImageDraw as ImageDraw
6 | import PIL.ImageFont as ImageFont
7 |
8 |
9 | # Core detection functions
10 |
11 |
12 | def load_model(checkpoint):
13 | """Load a detection model (i.e., create a graph) from a .pb file.
14 |
15 | Args:
16 | checkpoint: .pb file of the model.
17 |
18 | Returns: the loaded graph.
19 |
20 | """
21 | print('tf_detector.py: Loading graph...')
22 | detection_graph = tf.Graph()
23 | with detection_graph.as_default():
24 | od_graph_def = tf.GraphDef()
25 | with tf.gfile.GFile(checkpoint, 'rb') as fid:
26 | serialized_graph = fid.read()
27 | od_graph_def.ParseFromString(serialized_graph)
28 | tf.import_graph_def(od_graph_def, name='')
29 | print('tf_detector.py: Detection graph loaded.')
30 |
31 | return detection_graph
32 |
33 |
34 | def open_image(image_bytes):
35 | """ Open an image in binary format using PIL.Image and convert to RGB mode
36 | Args:
37 | image_bytes: an image in binary format read from the POST request's body
38 |
39 | Returns:
40 | an PIL image object in RGB mode
41 | """
42 | image = Image.open(image_bytes)
43 | if image.mode not in ('RGBA', 'RGB'):
44 | raise AttributeError('Input image not in RGBA or RGB mode and cannot be processed.')
45 | if image.mode == 'RGBA':
46 | # Image.convert() returns a converted copy of this image
47 | image = image.convert(mode='RGB')
48 | return image
49 |
50 |
51 | def generate_detections(detection_graph, image):
52 | """ Generates a set of bounding boxes with confidence and class prediction for one input image file.
53 |
54 | Args:
55 | detection_graph: an already loaded object detection inference graph.
56 | image_file: a PIL Image object
57 |
58 | Returns:
59 | boxes, scores, classes, and the image loaded from the input image_file - for one image
60 | """
61 | image_np = np.asarray(image, np.uint8)
62 | image_np = image_np[:, :, :3] # Remove the alpha channel
63 |
64 | #with detection_graph.as_default():
65 | with tf.Session(graph=detection_graph) as sess:
66 | image_np = np.expand_dims(image_np, axis=0)
67 |
68 | # get the operators
69 | image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
70 | box = detection_graph.get_tensor_by_name('detection_boxes:0')
71 | score = detection_graph.get_tensor_by_name('detection_scores:0')
72 | clss = detection_graph.get_tensor_by_name('detection_classes:0')
73 | num_detections = detection_graph.get_tensor_by_name('num_detections:0')
74 |
75 | # performs inference
76 | (box, score, clss, num_detections) = sess.run(
77 | [box, score, clss, num_detections],
78 | feed_dict={image_tensor: image_np})
79 |
80 | return np.squeeze(box), np.squeeze(score), np.squeeze(clss), image # these are lists of bboxes, scores etc
81 |
82 |
83 | # Rendering functions
84 |
85 |
86 | def render_bounding_boxes(boxes, scores, classes, image, label_map={}, confidence_threshold=0.5):
87 | """Renders bounding boxes, label and confidence on an image if confidence is above the threshold.
88 |
89 | Args:
90 | boxes, scores, classes: outputs of generate_detections.
91 | image: PIL.Image object, output of generate_detections.
92 | label_map: optional, mapping the numerical label to a string name.
93 | confidence_threshold: threshold above which the bounding box is rendered.
94 |
95 | image is modified in place!
96 |
97 | """
98 | display_boxes = []
99 | display_strs = [] # list of list, one list of strings for each bounding box (to accommodate multiple labels)
100 |
101 | for box, score, clss in zip(boxes, scores, classes):
102 | if score > confidence_threshold:
103 | print('Confidence of detection greater than threshold: ', score)
104 | display_boxes.append(box)
105 | clss = int(clss)
106 | label = label_map[clss] if clss in label_map else str(clss)
107 | displayed_label = '{}: {}%'.format(label, round(100*score))
108 | display_strs.append([displayed_label])
109 |
110 | display_boxes = np.array(display_boxes)
111 | if display_boxes.shape == (0,): # no detections are above threshold
112 | return
113 | else:
114 | draw_bounding_boxes_on_image(image, display_boxes, display_str_list_list=display_strs)
115 |
116 | # the following two functions are from https://github.com/tensorflow/models/blob/master/research/object_detection/utils/visualization_utils.py
117 |
118 | def draw_bounding_boxes_on_image(image,
119 | boxes,
120 | color='LimeGreen',
121 | thickness=4,
122 | display_str_list_list=()):
123 | """Draws bounding boxes on image.
124 |
125 | Args:
126 | image: a PIL.Image object.
127 | boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
128 | The coordinates are in normalized format between [0, 1].
129 | color: color to draw bounding box. Default is red.
130 | thickness: line thickness. Default value is 4.
131 | display_str_list_list: list of list of strings.
132 | a list of strings for each bounding box.
133 | The reason to pass a list of strings for a
134 | bounding box is that it might contain
135 | multiple labels.
136 |
137 | Raises:
138 | ValueError: if boxes is not a [N, 4] array
139 | """
140 | boxes_shape = boxes.shape
141 | if not boxes_shape:
142 | return
143 | if len(boxes_shape) != 2 or boxes_shape[1] != 4:
144 | raise ValueError('Input must be of size [N, 4]')
145 | for i in range(boxes_shape[0]):
146 | display_str_list = ()
147 | if display_str_list_list:
148 | display_str_list = display_str_list_list[i]
149 | draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
150 | boxes[i, 3], color, thickness, display_str_list)
151 |
152 |
153 | def draw_bounding_box_on_image(image,
154 | ymin,
155 | xmin,
156 | ymax,
157 | xmax,
158 | color='red',
159 | thickness=4,
160 | display_str_list=(),
161 | use_normalized_coordinates=True):
162 | """Adds a bounding box to an image.
163 |
164 | Bounding box coordinates can be specified in either absolute (pixel) or
165 | normalized coordinates by setting the use_normalized_coordinates argument.
166 |
167 | Each string in display_str_list is displayed on a separate line above the
168 | bounding box in black text on a rectangle filled with the input 'color'.
169 | If the top of the bounding box extends to the edge of the image, the strings
170 | are displayed below the bounding box.
171 |
172 | Args:
173 | image: a PIL.Image object.
174 | ymin: ymin of bounding box.
175 | xmin: xmin of bounding box.
176 | ymax: ymax of bounding box.
177 | xmax: xmax of bounding box.
178 | color: color to draw bounding box. Default is red.
179 | thickness: line thickness. Default value is 4.
180 | display_str_list: list of strings to display in box
181 | (each to be shown on its own line).
182 | use_normalized_coordinates: If True (default), treat coordinates
183 | ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
184 | coordinates as absolute.
185 | """
186 | draw = ImageDraw.Draw(image)
187 | im_width, im_height = image.size
188 | if use_normalized_coordinates:
189 | (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
190 | ymin * im_height, ymax * im_height)
191 | else:
192 | (left, right, top, bottom) = (xmin, xmax, ymin, ymax)
193 | draw.line([(left, top), (left, bottom), (right, bottom),
194 | (right, top), (left, top)], width=thickness, fill=color)
195 | try:
196 | font = ImageFont.truetype('arial.ttf', 24)
197 | except IOError:
198 | font = ImageFont.load_default()
199 |
200 | # If the total height of the display strings added to the top of the bounding
201 | # box exceeds the top of the image, stack the strings below the bounding box
202 | # instead of above.
203 | display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
204 | # Each display_str has a top and bottom margin of 0.05x.
205 | total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
206 |
207 | if top > total_display_str_height:
208 | text_bottom = top
209 | else:
210 | text_bottom = bottom + total_display_str_height
211 | # Reverse list and print from bottom to top.
212 | for display_str in display_str_list[::-1]:
213 | text_width, text_height = font.getsize(display_str)
214 | margin = np.ceil(0.05 * text_height)
215 | draw.rectangle(
216 | [(left, text_bottom - text_height - 2 * margin), (left + text_width,
217 | text_bottom)],
218 | fill=color)
219 | draw.text(
220 | (left + margin, text_bottom - text_height - margin),
221 | display_str,
222 | fill='black',
223 | font=font)
224 | text_bottom -= text_height - 2 * margin
225 |
--------------------------------------------------------------------------------
/JupyterNotebook.md:
--------------------------------------------------------------------------------
1 | # AI for Earth - Creating a Jupyter Notebook Demo
2 | The AI for Earth team is compiling a small suite of Jupyter Notebook demos. These notebooks allow us to showcase the work of the AI for Earth grant recipients, and demonstrate the use of their machine learning models to benefit agriculture, water, climate, and biodiversity.
3 |
4 | This article will walk you through the creation of a Jupyter notebook. You are also welcome to start with this [template notebook](./Notebooks/template-demo.ipynb).
5 |
6 | If you are already comfortable with Jupyter notebooks, you may create them however you'd like. If you are new to them, you are welcome to try https://notebooks.azure.com/ to create a Jupyter notebook.
7 |
8 | You will also need to provide sample data to use in this demonstration. Please ensure that it is data that can be shared publicly.
9 |
10 | ## Outline
11 |
12 | Here are the suggestions for the outline of your notebook.
13 | + The first cell should be markdown, giving a title for your demo and a short description of what it does.
14 | + The next two cells should be a markdown and corresponding code cell for imports and constants.
15 | + The next two cells should be a markdown and corresponding code cell for helper functions. Functions that retrieve data, display an image, plot data, etc. can live here.
16 | + The next two cells should be a markdown and corresponding code cell to display your input, if needed. In the situation of a computer vision problem like image classification, object detection, or segmentation, it is nice to first see the input image before we see your results.
17 | + Finally, the remaining cells can be used to call your machine learning model and show its results. If you are not hosting it yourself, please ensure that you have provided a Docker container to us.
18 |
19 | ## Guidelines
20 |
21 | Please choose a descriptive name for your notebook (and any config files if needed). The format organization-apiName-demo.ipynb would work well, where organization is your company or univeristy name and the api name describes the purpose of your machine learning model.
22 |
23 | We should get as close as we can to only three cells before the action happens: (1) one cell for constants, imports, and health check, (2) one cell for function definitions, and (3) one cell to retrieve and display sample input. Each should have a ### heading saying what's going on, and in general those headings should be similar or identical across notebooks. Merging into a small number of cells really streamlines the demo workflow and avoids lots of clicking.
24 |
25 | If there are details that you don't want to be visible to the audience during a demo (such as connection string information to an Azure blob storage account), you are welcome to provide a configuration file as well.
26 |
27 | When we log in to demo, output should be clear (it really steals the demo thunder when you see the API result before you call the API), so to disable autosave from saving the output, add %autosave 0. This can be done in the "Imports and Constants" section.
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Microsoft Corporation. All rights reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE
22 |
--------------------------------------------------------------------------------
/Notebooks/demo_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/AIforEarth-API-Development/4a720791330c82c4004c2ebffd059ff90cdb2f98/Notebooks/demo_image.jpg
--------------------------------------------------------------------------------
/Notebooks/hackathon.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Model to API\n",
8 | "\n",
9 | "\n",
10 | "https://github.com/Microsoft/AIforEarth-API-Development\n",
11 | "\n",
12 | "\n",
13 | "## Prerequisites\n",
14 | "\n",
15 | "\n",
16 | "### Docker (if running locally)\n",
17 | "\n",
18 | "- Windows: https://download.docker.com/win/stable/Docker%20for%20Windows%20Installer.exe\n",
19 | "\n",
20 | "- OSX: https://download.docker.com/mac/stable/Docker.dmg\n",
21 | "\n",
22 | "\n",
23 | "### Azure CLI (if running locally)\n",
24 | "\n",
25 | "https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest\n",
26 | "\n",
27 | "\n",
28 | "### Postman\n",
29 | "\n",
30 | "https://www.getpostman.com/\n",
31 | "\n",
32 | "\n",
33 | "### Git (optional)\n",
34 | "\n",
35 | "https://git-scm.com/downloads\n"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "# Containerize and Deploy\n",
43 | "## Set up development environment\n",
44 | "\n",
45 | "\n",
46 | "1. Visit https://shell.azure.com and sign in.\n",
47 | "\n",
48 | "2. Change the directory to clouddrive - this is the location of your shell environment.\n",
49 | "\n",
50 | "```bash\n",
51 | "cd clouddrive```\n",
52 | "\n",
53 | "3. Clone the AI for Earth API Framework to your drive\n",
54 | "\n",
55 | "```bash\n",
56 | "git clone https://github.com/Microsoft/AIforEarth-API-Development.git\n",
57 | "```\n",
58 | "\n",
59 | "4. Create your API.\n",
60 | "\n",
61 | "5. Log into Azure via the Azure CLI and select your subscription.\n",
62 | "\n",
63 | "```bash\n",
64 | "az login\n",
65 | "az account set --subscription \n",
66 | "```\n",
67 | "\n",
68 | "```bash\n",
69 | "cd clouddrive\n",
70 | "git clone https://github.com/Microsoft/AIforEarth-API-Development.git\n",
71 | "\n",
72 | "```"
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "## Handle images and create an endpoint\n",
80 | "1. Set an Azure Resource Group name\n",
81 | "```bash\n",
82 | "RES_GROUP=$INITIALS-rg```\n",
83 | "\n",
84 | "2. Set an Azure Container Registry name\n",
85 | "```bash\n",
86 | "ACR_NAME=$RES_GROUPregistry\n",
87 | "az group create --name $RES_GROUP --location eastus```\n",
88 | "\n",
89 | "3. Create your Azure Container Registry\n",
90 | "\n",
91 | "```bash\n",
92 | "az acr create --resource-group $RES_GROUP --name $ACR_NAME --sku Standard --location eastus --admin-enabled true```\n",
93 | "\n",
94 | "4. Build your container image\n",
95 | "\n",
96 | "```bash\n",
97 | "az acr build --registry $ACR_NAME --image $ACR_NAME.azurecr.io/IMAGE_NAME:1 .```\n",
98 | "\n",
99 | "5. Get credentials for your Azure Container Registry\n",
100 | "\n",
101 | "```bash\n",
102 | "az acr credential show --name $ACR_NAME --resource-group $RES_GROUP --subscription SUB_ID```\n",
103 | "\n",
104 | "6. Create an instance of Azure Container Instances with your container\n",
105 | "\n",
106 | "```bash\n",
107 | "az container create --resource-group $RES_GROUP --name $NAME_YOUR_INSTANCE --image $ACR_NAME.azurecr.io/IMAGE_NAME:1 --registry-login-server $ACR_NAME.azurecr.io --registry-username --registry-password --dns-name-label tstorm-$ACR_NAME --query \"{FQDN:ipAddress.fqdn}\" --output table```"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 1,
113 | "metadata": {},
114 | "outputs": [
115 | {
116 | "name": "stdout",
117 | "output_type": "stream",
118 | "text": [
119 | "Collecting aiohttp\n",
120 | " Downloading https://files.pythonhosted.org/packages/92/63/85e28605cd8f08a062974db3338c7e77437b662d980ef0dc6705fde167c6/aiohttp-3.5.4-cp35-cp35m-macosx_10_13_x86_64.whl (616kB)\n",
121 | "\u001b[K 100% |████████████████████████████████| 624kB 1.2MB/s ta 0:00:011\n",
122 | "\u001b[?25hCollecting async-timeout<4.0,>=3.0 (from aiohttp)\n",
123 | " Downloading https://files.pythonhosted.org/packages/e1/1e/5a4441be21b0726c4464f3f23c8b19628372f606755a9d2e46c187e65ec4/async_timeout-3.0.1-py3-none-any.whl\n",
124 | "Collecting yarl<2.0,>=1.0 (from aiohttp)\n",
125 | " Downloading https://files.pythonhosted.org/packages/fb/84/6d82f6be218c50b547aa29d0315e430cf8a23c52064c92d0a8377d7b7357/yarl-1.3.0.tar.gz (159kB)\n",
126 | "\u001b[K 100% |████████████████████████████████| 163kB 5.5MB/s eta 0:00:01\n",
127 | "\u001b[?25hCollecting idna-ssl>=1.0; python_version < \"3.7\" (from aiohttp)\n",
128 | " Downloading https://files.pythonhosted.org/packages/46/03/07c4894aae38b0de52b52586b24bf189bb83e4ddabfe2e2c8f2419eec6f4/idna-ssl-1.1.0.tar.gz\n",
129 | "Requirement already satisfied: chardet<4.0,>=2.0 in /Users/pflickin/anaconda/lib/python3.5/site-packages (from aiohttp)\n",
130 | "Requirement already satisfied: attrs>=17.3.0 in /Users/pflickin/anaconda/lib/python3.5/site-packages (from aiohttp)\n",
131 | "Collecting multidict<5.0,>=4.0 (from aiohttp)\n",
132 | " Downloading https://files.pythonhosted.org/packages/bb/28/1fc220d278c6c3aa276685cd6e4116bda7b4f95aa6c62e343986b284281b/multidict-4.5.2-cp35-cp35m-macosx_10_12_intel.macosx_10_12_x86_64.macosx_10_13_intel.macosx_10_13_x86_64.whl (195kB)\n",
133 | "\u001b[K 100% |████████████████████████████████| 204kB 4.9MB/s eta 0:00:01\n",
134 | "\u001b[?25hCollecting typing-extensions>=3.6.5; python_version < \"3.7\" (from aiohttp)\n",
135 | " Downloading https://files.pythonhosted.org/packages/0f/62/c66e553258c37c33f9939abb2dd8d2481803d860ff68e635466f12aa7efa/typing_extensions-3.7.2-py3-none-any.whl\n",
136 | "Requirement already satisfied: idna>=2.0 in /Users/pflickin/anaconda/lib/python3.5/site-packages (from yarl<2.0,>=1.0->aiohttp)\n",
137 | "Building wheels for collected packages: yarl, idna-ssl\n",
138 | " Running setup.py bdist_wheel for yarl ... \u001b[?25ldone\n",
139 | "\u001b[?25h Stored in directory: /Users/pflickin/Library/Caches/pip/wheels/e3/f0/13/d7c1c5cd76ef321fb635ce79232ca973cd0c91fabaaa71e1c7\n",
140 | " Running setup.py bdist_wheel for idna-ssl ... \u001b[?25ldone\n",
141 | "\u001b[?25h Stored in directory: /Users/pflickin/Library/Caches/pip/wheels/d3/00/b3/32d613e19e08a739751dd6bf998cfed277728f8b2127ad4eb7\n",
142 | "Successfully built yarl idna-ssl\n",
143 | "Installing collected packages: async-timeout, multidict, yarl, idna-ssl, typing-extensions, aiohttp\n",
144 | "Successfully installed aiohttp-3.5.4 async-timeout-3.0.1 idna-ssl-1.1.0 multidict-4.5.2 typing-extensions-3.7.2 yarl-1.3.0\n",
145 | "\u001b[33mYou are using pip version 9.0.1, however version 19.1.1 is available.\n",
146 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
147 | ]
148 | }
149 | ],
150 | "source": [
151 | "!pip install aiohttp"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "metadata": {},
157 | "source": [
158 | "## Sync endpoint - Azure Container Instances"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "import requests\n",
168 | "my_url = 'http://YOUR_REGISTRY.eastus.azurecontainer.io/YOUR_URL'\n",
169 | "\n",
170 | "# Example on setting file payloads\n",
171 | "f1 = open(\"track_step_NCARSTORM_d01_20170323-0000.csv\", \"rb\")\n",
172 | "f2 = open(\"track_step_NCARSTORM_d01_20170329-0000.csv\", \"rb\")\n",
173 | "payload = {'1.csv': f1, '2.csv': f2}\n",
174 | "\n",
175 | "r = requests.post(my_url, files=payload)\n",
176 | "forecast_values = r.text\n",
177 | "print(forecast_values)"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "## Async endpoint - Azure Container Instances\n",
185 | "A task id will be returned. The following cell will gather the status of your run with your task id."
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": null,
191 | "metadata": {},
192 | "outputs": [],
193 | "source": [
194 | "import aiohttp\n",
195 | "my_url = 'http://YOUR_REGISTRY.eastus.azurecontainer.io/YOUR_URL'\n",
196 | "\n",
197 | "async with aiohttp.ClientSession() as session:\n",
198 | " async with session.post(my_url, json ={\n",
199 | " \"track_step_file_names\": [\n",
200 | " \"track_step_NCARSTORM_d01_20170323-0000.csv\", \n",
201 | " \"track_step_NCARSTORM_d01_20170329-0000.csv\"],\n",
202 | " \"sas_container\": \"https://sourcedata.blob.core.windows.net/async?st=2019-04-25T12%3A20%3A04Z&se=2019-04-26T12%3A20%3A04Z&sp=rwl&sv=2018-03-28&sr=c&sig=bo%2FZAn6rbvvHU54IruJwXPVh6emYam4XT%2FtA0YkgaZM%3D\"\n",
203 | "}) as response:\n",
204 | " data = await response.text()\n",
205 | " print (data)"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": [
214 | "task_id = 3501\n",
215 | "import aiohttp\n",
216 | "my_url = 'http://tstorm-pefregistry.eastus.azurecontainer.io/v1/thunderstorm/task/' + str(task_id)\n",
217 | "\n",
218 | "async with aiohttp.ClientSession() as session:\n",
219 | " async with session.get(my_url) as response:\n",
220 | " data = await response.text()\n",
221 | " print (data)"
222 | ]
223 | }
224 | ],
225 | "metadata": {
226 | "anaconda-cloud": {},
227 | "kernelspec": {
228 | "display_name": "Python [Root]",
229 | "language": "python",
230 | "name": "Python [Root]"
231 | },
232 | "language_info": {
233 | "codemirror_mode": {
234 | "name": "ipython",
235 | "version": 3
236 | },
237 | "file_extension": ".py",
238 | "mimetype": "text/x-python",
239 | "name": "python",
240 | "nbconvert_exporter": "python",
241 | "pygments_lexer": "ipython3",
242 | "version": "3.5.4"
243 | }
244 | },
245 | "nbformat": 4,
246 | "nbformat_minor": 2
247 | }
248 |
--------------------------------------------------------------------------------
/Notebooks/lab_manual.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Lab: Deploy your ML/DL model as a scalable API in 60 minutes\n",
8 | "\n",
9 | "Repo: https://github.com/microsoft/AIforEarth-API-Development\n",
10 | "\n",
11 | "This document is meant to be a lab walk-through, so it contains all the instructions for you to complete the steps easily, but has minimal explanation on _how_ the technologies involved work or _why_ we use them.\n",
12 | "\n",
13 | "For more motivation for deploying machine learning models as APIs as a way to make your work available to the community and detailed explanation of each step, see our [Quickstart](https://github.com/microsoft/AIforEarth-API-Development/blob/master/Quickstart.md#ai-for-earth---quickstart-tutorial) or the main [Readme](https://github.com/Microsoft/AIforEarth-API-Development/#ai-for-earth---creating-apis) on this repo."
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {},
19 | "source": [
20 | "## 1. Prerequisites\n",
21 | "\n",
22 | "There are two options for working through the lab:\n",
23 | "\n",
24 | "1. Using your laptop. Make sure to have Docker installed and working. Instructions for all operating systems can be found here: https://docs.docker.com/install/\n",
25 | " - Even if you’re running Windows, we use Linux containers for model deployment. When the Docker Desktop installer asks you:\n",
26 | " - If you want to use Windows containers, say “no” (i.e., don’t check the box that says “use Windows containers instead of Linux containers”)\n",
27 | " - If you want to reboot to enable Hyper-V so you can run Linux containers, say “yes”\n",
28 | "\n",
29 | "\n",
30 | "2. Using an Azure Data Science Virtual Machine for Linux (Ubuntu) (important to get the *Linux* version, not Windows), where Docker is already installed. Make sure you have access to an Azure Subscription and have the VM created there, and that you have good tools for editing code on the Linux VM (comfortable with emacs/vim, or use VS Code plug-ins, Sublime SFTP package, PyCharm Professional with deployment support, etc).\n",
31 | "\n",
32 | "If you would like to deploy a model of your own during the lab instead of using a sample model, make sure to have the model file at hand and a Python or R script for loading the model and performing inference on incoming data. \n",
33 | "\n",
34 | "If you would like to go through the \"deploy remotely\" section of this tutorial, it's best to have Azure CLI [installed](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) in your environment so you can log into an Azure Container Registry. This assumes that you have access to an Azure subscription."
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "## 2. Motivation\n",
42 | "\n",
43 | "The idea is that you can use our Framework to develop and test an API locally, and the resulting container can be “dropped in” and deployed on our scalable, Kubernetes-based [platform](https://github.com/microsoft/AIforEarth-API-Platform/) with no additional changes (local libraries are swapped with distributed/scalable versions)."
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "## 3. Preparation\n",
51 | "\n",
52 | "### 3.1 Make sure Docker is running\n",
53 | "\n",
54 | "In your environment (local or a Linux DSVM), make sure Docker is running:\n",
55 | "\n",
56 | "```\n",
57 | "docker ps\n",
58 | "```\n",
59 | "This lists all the Docker processes, which is probably empty right now.\n",
60 | "\n",
61 | "If you're running into permission issues, use `sudo` with all the Docker commands. If you don’t want to preface the `docker` command with `sudo`, go through the post-installation processes detailed [here](https://docs.docker.com/install/linux/linux-postinstall/) and restart the VM.\n",
62 | "\n",
63 | "\n",
64 | "### 3.2 Pull our example `base-py` Docker image\n",
65 | "\n",
66 | "To save you time later on, please issue the following command to start downloading the Python-based base image:\n",
67 | "\n",
68 | "```\n",
69 | "docker pull mcr.microsoft.com/aiforearth/base-py:latest\n",
70 | "```\n",
71 | "You should see a sequence of layers get downloaded.\n",
72 | "\n",
73 | "\n",
74 | "### 3.3 Clone this repo\n",
75 | "\n",
76 | "Clone this repository:\n",
77 | "\n",
78 | "```\n",
79 | "git clone https://github.com/microsoft/AIforEarth-API-Development.git\n",
80 | "```\n",
81 | "\n",
82 | "In this walk-through, we will deploy a toy API using our `base-py` image, which is the most barebone Python-based Docker image in our API Framework. \n",
83 | "\n",
84 | "- Navigate to the `base-py` example in `AIforEarth-API-Development/Examples/base-py`.\n",
85 | "\n",
86 | "\n",
87 | "### 3.4 Examine components of the API files\n",
88 | "\n",
89 | "The `Examples/base-py` directory contains all the pieces of the API. To read more about what the `Dockerfile` and other files do, visit the [main readme](https://github.com/microsoft/AIforEarth-API-Development#step-2-build-an-example-service) of this repo. The most important files are:\n",
90 | "\n",
91 | "- `Dockerfile`: describes the commands that needs to be executed on top of a base image, such as installing additional packages using `conda`, copying the API execution code (`./my_api`) to the container, setting up environment variables, and expose a port on the container (1212).\n",
92 | " - For the list of instructions that can be used in a Dockerfile, see https://docs.docker.com/engine/reference/builder (Docker documentation is excellent!). \n",
93 | "\n",
94 | "- `my_api/runserver.py`: the `my_api` folder should contain all the execution code files and model files (inference graphs, pickled model weights, etc). \n",
95 | " - The script `runserver.py` is the entry point script, where the Flask app is set up and your endpoints defined. \n",
96 | " \n",
97 | " \n",
98 | "#### Endpoints\n",
99 | "\n",
100 | "Notice that currently in `my_api/runserver.py`, there are two endpoints defined, marked by the `@ai4e_service.api_async_func` and the `@ai4e_service.api_sync_func` decorators. \n",
101 | "\n",
102 | "For a more detailed explanation of the input/output patterns, see this [section](https://github.com/microsoft/AIforEarth-API-Development/blob/master/Quickstart.md#inputoutput-patterns) in our Quickstart.\n",
103 | "\n",
104 | "##### Async endpoint\n",
105 | "\n",
106 | "The async/long-running endpoint `default_post` has an input validation function called `process_request_data`, defined above and referenced in the `request_processing_function` field of the decorator. It currently just reads the data passed by the user in the POST body of the call, and make it available to the `default_post` function in a keyword argument called `data`. \n",
107 | "\n",
108 | "This function then obtains a `taskId` in another keyword argument - this is placed here by the API Framework. You should pass this `taskId` back to the user before entering into a long-running process so they can use it at the `/task` endpoint to check the status of their request.\n",
109 | "\n",
110 | "It then loads the serialized json body, and calls a function `run_model`, where your model inference code should take the input arguments and data, apply the model, and uploads the results somewhere or write it in the status body if the result is short (such as a classification).\n",
111 | "\n",
112 | "##### Sync endpoint\n",
113 | "\n",
114 | "The example synchronous endpoint is `echo`, which echos the argument the user passed in through the URL back to the user."
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "## 4. Deploy locally\n",
122 | "\n",
123 | "### 4.1 Build the image\n",
124 | "To build a Docker image from the Dockerfile in the current directory:\n",
125 | "```\n",
126 | "docker build . -t yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
127 | "```\n",
128 | "\n",
129 | "The first time you build it, it will be fairly slow (even if you've previously pulled the base image) as it needs to install all the conda dependencies. Docker will cache layers that are not modified, so subsequent builds should be much faster as it is only copying the code that changed in the `my_api` folder.\n",
130 | "\n",
131 | "Here `.` refers to the fact that we are using the current directory as the Docker build context. The `-t` argument is the tag that you can give to this image; here's a convention for naming Docker images, which is:\n",
132 | "\n",
133 | "```\n",
134 | ".azurecr.io//-:\n",
135 | "```\n",
136 | "\n",
137 | "### 4.2 Start the container\n",
138 | "\n",
139 | "To start a container using this image you just built:\n",
140 | "```\n",
141 | "docker run -p 6002:1212 yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
142 | "```\n",
143 | "The port mapping specified using `-p` maps localhost:6002 to port 1212 in the Docker container, which you exposed in the Dockerfile. \n",
144 | "\n",
145 | "If you're on Windows and run into an error `standard_init_linux.go:207: exec user process caused \"no such file or directory\"`, see this [section](https://github.com/microsoft/AIforEarth-API-Development/blob/master/Quickstart.md#run-your-image-locally) in our Quickstart for how to fix it.\n",
146 | "\n",
147 | "\n",
148 | "### 4.3 Test the synchronous endpoint\n",
149 | "You can now make an API call to\n",
150 | "\n",
151 | "```\n",
152 | "http://localhost:6002/v1/my_api/tasker/echo/hello_world\n",
153 | "```\n",
154 | "to hit the sync `echo` endpoint defined in `my_api/runserver.py`. You can paste this URL string into a browser since it's a GET request; otherwise, use the following snippet to test it programmatically:"
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": 1,
160 | "metadata": {},
161 | "outputs": [
162 | {
163 | "name": "stdout",
164 | "output_type": "stream",
165 | "text": [
166 | "Echo: hello_world\n"
167 | ]
168 | }
169 | ],
170 | "source": [
171 | "import requests\n",
172 | "\n",
173 | "base_url = 'http://localhost:6002/v1/my_api/tasker/'\n",
174 | "sync_endpoint = 'echo/'\n",
175 | "argument = 'hello_world'\n",
176 | "\n",
177 | "url = base_url + sync_endpoint + argument\n",
178 | "\n",
179 | "r = requests.get(url)\n",
180 | "print(r.text)"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "### Test the asynchronous endpoint"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": 2,
193 | "metadata": {},
194 | "outputs": [
195 | {
196 | "name": "stdout",
197 | "output_type": "stream",
198 | "text": [
199 | "TaskId: 9365\n"
200 | ]
201 | }
202 | ],
203 | "source": [
204 | "import json\n",
205 | "\n",
206 | "async_endpoint = 'example'\n",
207 | "\n",
208 | "url = base_url + async_endpoint\n",
209 | "\n",
210 | "payload = {'key': 'value'}\n",
211 | "payload = json.dumps(payload) # serialize the json payload\n",
212 | "\n",
213 | "r = requests.post(url, data=payload)\n",
214 | "print(r.text)"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 3,
220 | "metadata": {},
221 | "outputs": [],
222 | "source": [
223 | "task_id = r.text.split('TaskId: ')[1]"
224 | ]
225 | },
226 | {
227 | "cell_type": "code",
228 | "execution_count": 4,
229 | "metadata": {},
230 | "outputs": [
231 | {
232 | "name": "stdout",
233 | "output_type": "stream",
234 | "text": [
235 | "{\"uuid\": 9365, \"status\": \"running model\", \"timestamp\": \"2019-06-07 04:44:46\", \"endpoint\": \"uri\"}\n",
236 | "\n"
237 | ]
238 | }
239 | ],
240 | "source": [
241 | "# check the status using the TaskID returned\n",
242 | "r = requests.get(base_url + 'task/' + task_id)\n",
243 | "print(r.text)\n",
244 | "\n",
245 | "# the example async API sleeps for 10 seconds. Check status again after 10 seconds and you should\n",
246 | "# see that the \"status\" is now \"completed\"."
247 | ]
248 | },
249 | {
250 | "cell_type": "markdown",
251 | "metadata": {},
252 | "source": [
253 | "### 4.4 Development process\n",
254 | "\n",
255 | "To kill the running Docker container, open another command line window and check its container ID (first column) or name (last column) using `docker ps`, then\n",
256 | "```\n",
257 | "docker kill container_ID_or_name\n",
258 | "```\n",
259 | "\n",
260 | "You may need to repeat this build-run-kill process as you make changes and debug through your API code.\n",
261 | "\n",
262 | "\n",
263 | "### 4.5 Using GPUs\n",
264 | "\n",
265 | "Substitute `nvidia-docker` in the place of `docker` in the `docker build` command if you're on a GPU-enabled VM with the necessary CUDA drivers so as to use the GPU for your inference:\n",
266 | "\n",
267 | "```\n",
268 | "nvidia-docker build . -t yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
269 | "```\n"
270 | ]
271 | },
272 | {
273 | "cell_type": "markdown",
274 | "metadata": {},
275 | "source": [
276 | "## 5. Publish the Docker image\n",
277 | "\n",
278 | "When you are happy with how your API is working, you could push your Docker image containing all your API code and model files to Azure Container Registry (or DockerHub, if making it public). \n",
279 | "\n",
280 | "This allows you to pull (download) the same docker image on another VM and start a container based on that image and launch your API that way. Alternatively, you could still build the image on the VM you want to deploy to and start a container from that, if you can easily transfer the source code and model artifacts to the VM. To deploy using Azure Container Instances instead of a VM (so that you don't have to manage the VM), you would need your image pushed to an Azure Container Registry (ACR). \n",
281 | "\n",
282 | "Instructions for setting up your ACR are in [this section](https://github.com/microsoft/AIforEarth-API-Development/blob/master/Quickstart.md#publish-to-azure-container-registry) in our Quickstart.\n",
283 | "\n",
284 | "Assuming you've successfully set up an ACR called `yasiyu` with login server `yasiyu.azurecr.io`, you can log in to that on the command line with the Azure CLI:\n",
285 | "\n",
286 | "```\n",
287 | "az acr login --name yasiyu\n",
288 | "```\n",
289 | "\n",
290 | "assuming you have logged in to your Azure account via `az login` and that `az account show` indicates that the default subscription is the one containing this ACR instance.\n",
291 | "\n",
292 | "\n",
293 | "Then, push the image you built above:\n",
294 | "\n",
295 | "```\n",
296 | "docker push yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
297 | "```\n"
298 | ]
299 | },
300 | {
301 | "cell_type": "markdown",
302 | "metadata": {},
303 | "source": [
304 | "## 6. Deploy on a VM\n",
305 | "\n",
306 | "One way to deploy this API for people in your team or a small group of users to call is to serve it from an Azure Linux VM.\n",
307 | "\n",
308 | "This involves starting a Docker container based on your Docker image in a [tmux session](https://hackernoon.com/a-gentle-introduction-to-tmux-8d784c404340) (or running in the background) on the VM. The tmux session allows your process to run after you've left the ssh session.\n",
309 | "\n",
310 | "If you have pushed your image to ACR, login to your ACR on the VM, and pull the image:\n",
311 | "\n",
312 | "```\n",
313 | "docker pull yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
314 | "```\n",
315 | "\n",
316 | "(It seems that you need your ACR name in all lower case...)\n",
317 | "\n",
318 | "And start a container based on that image:\n",
319 | "\n",
320 | "```\n",
321 | "docker run -p 6000:1212 yasiyu.azurecr.io/my-api/1.0-example-api:1\n",
322 | "```\n",
323 | "\n",
324 | "Now visit Azure portal to open port 6000 on that VM. On the \"Networking\" section of the VM page, click on \"Add inbound port rule\", fill \"6000\" in \"Destination portl ranges\", and click \"Add\".\n",
325 | "\n",
326 | "Your API should now be available at \n",
327 | "```\n",
328 | "http://VM_DNS_name.vm_region.cloudapp.azure.com:6000\n",
329 | "```\n",
330 | "or\n",
331 | "```\n",
332 | "http://VM_IP_address:6000\n",
333 | "```"
334 | ]
335 | },
336 | {
337 | "cell_type": "markdown",
338 | "metadata": {},
339 | "source": [
340 | "## 7. Deploy via Azure Container Instances (ACI)\n",
341 | "\n",
342 | "See [section](https://github.com/microsoft/AIforEarth-API-Development/blob/master/Quickstart.md#run-your-container-in-azure-container-instances) \"Run your container in Azure Container Instances\" in our Quickstart for how to deploy an ACI container group based on the Docker image you pushed to your ACR in Azure Portal.\n",
343 | "\n",
344 | "Alternatively, you can deploy an ACI container group using the Azure CLI on the command line. First log in to your ACR:\n",
345 | "\n",
346 | "```\n",
347 | "az acr login --name yasiyu\n",
348 | "```\n",
349 | "Then create an ACI instance:\n",
350 | "\n",
351 | "```\n",
352 | "az container create --resource-group yasiyu_rg --name example-container1 --image yasiyu.azurecr.io/my-api/1.0-example-api:1 --dns-name-label yasiyu-api1 --ports 1212 --registry-username --registry-password \n",
353 | "```\n",
354 | "- You can look up the `registry-username` and `registry-password` fields in the Azure Portal page for your registry, in the \"Access keys\" section under \"Settings\".\n",
355 | "\n",
356 | "- Note that the `ports` argument should be `1212` since that is the port we specified to expose in the Dockerfile.\n",
357 | "\n",
358 | "- Documentation for the `az container create` command is [here](https://docs.microsoft.com/en-us/cli/azure/container?view=azure-cli-latest#az-container-create).\n",
359 | "\n",
360 | "- The container name must contain no more than 63 characters and must match the regex `[a-z0-9]([-a-z0-9]*[a-z0-9])?` (e.g. 'my-name')\n",
361 | "\n",
362 | "This will take a few minutes, and should print out the deployment status, with 'provisioningState' showing 'Succeeded'. Your IP address is also in this information, which you can also query using\n",
363 | "\n",
364 | "```\n",
365 | "az container show --resource-group yasiyu_rg --name example-container --query \"{FQDN:ipAddress.fqdn}\" --output table\n",
366 | "```\n",
367 | "\n",
368 | "Your API should now be available at this URL and the port you specified. "
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": 5,
374 | "metadata": {},
375 | "outputs": [
376 | {
377 | "name": "stdout",
378 | "output_type": "stream",
379 | "text": [
380 | "http://yasiyu-api1.eastus.azurecontainer.io:1212/v1/my_api/tasker/echo/hello_world\n",
381 | "Echo: hello_world\n"
382 | ]
383 | }
384 | ],
385 | "source": [
386 | "aci_address = 'yasiyu-api1.eastus.azurecontainer.io'\n",
387 | "aci_port = '1212'\n",
388 | "\n",
389 | "base_url = 'http://{}:{}/v1/my_api/tasker/'.format(aci_address, aci_port)\n",
390 | "\n",
391 | "sync_endpoint = 'echo/'\n",
392 | "argument = 'hello_world'\n",
393 | "\n",
394 | "url = base_url + sync_endpoint + argument\n",
395 | "print(url)\n",
396 | "\n",
397 | "r = requests.get(url)\n",
398 | "print(r.text)"
399 | ]
400 | },
401 | {
402 | "cell_type": "markdown",
403 | "metadata": {},
404 | "source": [
405 | "Congratulations, you now know how to deploy a Flask-based API! "
406 | ]
407 | },
408 | {
409 | "cell_type": "markdown",
410 | "metadata": {},
411 | "source": [
412 | "## 8. Deploy your own model\n",
413 | "\n",
414 | "Time to plug in your useful model! If you don't have a model that you'd like to try this with right now, we have sample code in [Examples](https://github.com/microsoft/AIforEarth-API-Development/tree/master/Examples) for PyTorch and TensorFlow (in addition, `animal-detector-api` is a model built with the TensorFlow Object Detection API) and instructions for downloading the required model files and sample data. \n",
415 | "\n",
416 | "You can now copy the `base-py` folder that we've built this example image with to your own repo, drop in your model file in `my-api`, and place your input handling and model inference code in `runserver.py` or another file it imports from. \n",
417 | "\n",
418 | "If you decide to change the name of the folder `my-api` or `runserver.py`, you also need to change the path to this entry point script in `supervisord.conf`."
419 | ]
420 | },
421 | {
422 | "cell_type": "markdown",
423 | "metadata": {},
424 | "source": [
425 | "## 9. Deploy on our scalable platform\n",
426 | "\n",
427 | "The Docker image you have created in this process, once integrated with your model files and inference code, can be deployed on our hosting platform with no additional changes. The local packages for task management and telemetry will be swapped with distributed versions.\n",
428 | "\n",
429 | "If your team would like to host the platform and make your APIs available that way, we will soon be publishing an [Azure Resource Manager](https://docs.microsoft.com/en-us/azure/azure-resource-manager/) template that describes all of our components so you could replicate easily."
430 | ]
431 | },
432 | {
433 | "cell_type": "markdown",
434 | "metadata": {},
435 | "source": [
436 | "## 10. Resource cleanup\n",
437 | "\n",
438 | "Don't forget to delete all the resources that you've set up to complete this lab afterwards, most importantly any VM or Azure Container Instances (ACI) instances, but also the Azure Container Registry (you can delete individual images stored there). You can do this in the Azure Portal or through the CLI."
439 | ]
440 | },
441 | {
442 | "cell_type": "code",
443 | "execution_count": null,
444 | "metadata": {
445 | "collapsed": true
446 | },
447 | "outputs": [],
448 | "source": []
449 | }
450 | ],
451 | "metadata": {
452 | "anaconda-cloud": {},
453 | "kernelspec": {
454 | "display_name": "Python [conda env:tensorflow]",
455 | "language": "python",
456 | "name": "conda-env-tensorflow-py"
457 | },
458 | "language_info": {
459 | "codemirror_mode": {
460 | "name": "ipython",
461 | "version": 3
462 | },
463 | "file_extension": ".py",
464 | "mimetype": "text/x-python",
465 | "name": "python",
466 | "nbconvert_exporter": "python",
467 | "pygments_lexer": "ipython3",
468 | "version": "3.5.4"
469 | }
470 | },
471 | "nbformat": 4,
472 | "nbformat_minor": 1
473 | }
474 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Security
4 |
5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
6 |
7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
8 |
9 | ## Reporting Security Issues
10 |
11 | **Please do not report security vulnerabilities through public GitHub issues.**
12 |
13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
14 |
15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
16 |
17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
18 |
19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
20 |
21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
22 | * Full paths of source file(s) related to the manifestation of the issue
23 | * The location of the affected source code (tag/branch/commit or direct URL)
24 | * Any special configuration required to reproduce the issue
25 | * Step-by-step instructions to reproduce the issue
26 | * Proof-of-concept or exploit code (if possible)
27 | * Impact of the issue, including how an attacker might exploit the issue
28 |
29 | This information will help us triage your report more quickly.
30 |
31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
32 |
33 | ## Preferred Languages
34 |
35 | We prefer all communications to be in English.
36 |
37 | ## Policy
38 |
39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
40 |
41 |
42 |
--------------------------------------------------------------------------------