├── .github
├── ISSUE_TEMPLATE.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE.md
├── README.md
├── example.py
├── requirements.txt
├── samples
├── __init__.py
├── anomaly_detector_samples.py
├── csv_files
│ └── anomaly_detector_daily_series.csv
├── face
│ ├── find_similar.py
│ └── identify.py
├── knowledge
│ ├── __init__.py
│ └── qna_maker_samples.py
├── language
│ ├── __init__.py
│ ├── luis
│ │ ├── LuisApp.json
│ │ ├── README.md
│ │ ├── images
│ │ │ ├── outcome.png
│ │ │ ├── prereqs-apikey.png
│ │ │ ├── prereqs-appid.png
│ │ │ ├── prereqs-import.png
│ │ │ └── programmatic-key.png
│ │ ├── luis_authoring_samples.py
│ │ └── luis_runtime_samples.py
│ └── spellcheck_samples.py
├── search
│ ├── TestImages
│ │ └── image.jpg
│ ├── __init__.py
│ ├── autosuggest_samples.py
│ ├── custom_image_search_samples.py
│ ├── custom_search_samples.py
│ ├── entity_search_samples.py
│ ├── image-search-quickstart.py
│ ├── image_search_samples.py
│ ├── news_search_samples.py
│ ├── video_search_samples.py
│ ├── visual_search_samples.py
│ └── web_search_samples.py
├── tools.py
└── vision
│ ├── __init__.py
│ ├── computer_vision_extract_text.py
│ ├── computer_vision_samples.py
│ ├── content_moderator_image_job_samples.py
│ ├── content_moderator_image_list_samples.py
│ ├── content_moderator_image_moderation_samples.py
│ ├── content_moderator_image_review.py
│ ├── content_moderator_term_list_samples.py
│ ├── content_moderator_text_moderation_samples.py
│ ├── content_moderator_video_analyze.py
│ ├── content_moderator_video_review.py
│ ├── content_moderator_video_transcript_review.py
│ ├── custom_vision_object_detection_sample.py
│ ├── custom_vision_prediction_samples.py
│ ├── custom_vision_training_multiclass_samples.py
│ ├── custom_vision_training_samples.py
│ ├── face_person_group_samples.py
│ ├── face_samples.py
│ ├── images
│ ├── Face
│ │ ├── Family1-Dad1.jpg
│ │ ├── Family1-Dad2.jpg
│ │ ├── Family1-Son1.jpg
│ │ ├── child1.jpg
│ │ ├── child2.jpg
│ │ ├── child3.jpg
│ │ ├── man1.jpg
│ │ ├── man2.jpg
│ │ ├── man3.jpg
│ │ ├── test-image.jpg
│ │ ├── woman1.jpg
│ │ ├── woman2.jpg
│ │ └── woman3.jpg
│ ├── Hemlock
│ │ ├── hemlock_1.jpg
│ │ ├── hemlock_10.jpg
│ │ ├── hemlock_2.jpg
│ │ ├── hemlock_3.jpg
│ │ ├── hemlock_4.jpg
│ │ ├── hemlock_5.jpg
│ │ ├── hemlock_6.jpg
│ │ ├── hemlock_7.jpg
│ │ ├── hemlock_8.jpg
│ │ └── hemlock_9.jpg
│ ├── Japanese Cherry
│ │ ├── japanese_cherry_1.jpg
│ │ ├── japanese_cherry_10.jpg
│ │ ├── japanese_cherry_2.jpg
│ │ ├── japanese_cherry_3.jpg
│ │ ├── japanese_cherry_4.jpg
│ │ ├── japanese_cherry_5.jpg
│ │ ├── japanese_cherry_6.jpg
│ │ ├── japanese_cherry_7.jpg
│ │ ├── japanese_cherry_8.jpg
│ │ └── japanese_cherry_9.jpg
│ ├── Test
│ │ ├── test_image.jpg
│ │ └── test_od_image.jpg
│ ├── computer_vision_ocr.png
│ ├── fork
│ │ ├── fork_1.jpg
│ │ ├── fork_10.jpg
│ │ ├── fork_11.jpg
│ │ ├── fork_12.jpg
│ │ ├── fork_13.jpg
│ │ ├── fork_14.jpg
│ │ ├── fork_15.jpg
│ │ ├── fork_16.jpg
│ │ ├── fork_17.jpg
│ │ ├── fork_18.jpg
│ │ ├── fork_19.jpg
│ │ ├── fork_2.jpg
│ │ ├── fork_20.jpg
│ │ ├── fork_3.jpg
│ │ ├── fork_4.jpg
│ │ ├── fork_5.jpg
│ │ ├── fork_6.jpg
│ │ ├── fork_7.jpg
│ │ ├── fork_8.jpg
│ │ └── fork_9.jpg
│ ├── house.jpg
│ ├── make_things_happen.jpg
│ └── scissors
│ │ ├── scissors_1.jpg
│ │ ├── scissors_10.jpg
│ │ ├── scissors_11.jpg
│ │ ├── scissors_12.jpg
│ │ ├── scissors_13.jpg
│ │ ├── scissors_14.jpg
│ │ ├── scissors_15.jpg
│ │ ├── scissors_16.jpg
│ │ ├── scissors_17.jpg
│ │ ├── scissors_18.jpg
│ │ ├── scissors_19.jpg
│ │ ├── scissors_2.jpg
│ │ ├── scissors_20.jpg
│ │ ├── scissors_3.jpg
│ │ ├── scissors_4.jpg
│ │ ├── scissors_5.jpg
│ │ ├── scissors_6.jpg
│ │ ├── scissors_7.jpg
│ │ ├── scissors_8.jpg
│ │ └── scissors_9.jpg
│ ├── inkrecognizer_sample.py
│ ├── ocr-data-safety
│ ├── OCR for data safety and content safety.ipynb
│ ├── diagram.jpg
│ └── example.png
│ └── text_files
│ ├── content_moderator_term_list.txt
│ ├── content_moderator_text_moderation.txt
│ └── content_moderator_video_transcript.txt
└── tests
├── README.md
├── recordings
└── test_example.yaml
├── test-requirements.txt
├── test_example.py
└── testsettings.cfg
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
4 | > Please provide us with the following information:
5 | > ---------------------------------------------------------------
6 |
7 | ### This issue is for a: (mark with an `x`)
8 | ```
9 | - [ ] bug report -> please search issues before submitting
10 | - [ ] feature request
11 | - [ ] documentation issue or request
12 | - [ ] regression (a behavior that used to work and stopped in a new release)
13 | ```
14 |
15 | ### Minimal steps to reproduce
16 | >
17 |
18 | ### Any log messages given by the failure
19 | >
20 |
21 | ### Expected/desired behavior
22 | >
23 |
24 | ### OS and Version?
25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?)
26 |
27 | ### Versions
28 | >
29 |
30 | ### Mention any other details that might be useful
31 |
32 | > ---------------------------------------------------------------
33 | > Thanks! We'll be in touch soon.
34 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Purpose
2 |
3 | * ...
4 |
5 | ## Does this introduce a breaking change?
6 |
7 | ```
8 | [ ] Yes
9 | [ ] No
10 | ```
11 |
12 | ## Pull Request Type
13 | What kind of change does this Pull Request introduce?
14 |
15 |
16 | ```
17 | [ ] Bugfix
18 | [ ] Feature
19 | [ ] Code style update (formatting, local variables)
20 | [ ] Refactoring (no functional changes, no api changes)
21 | [ ] Documentation content changes
22 | [ ] Other... Please describe:
23 | ```
24 |
25 | ## How to Test
26 | * Get the code
27 |
28 | ```
29 | git clone [repo-address]
30 | cd [repo-name]
31 | git checkout [branch-name]
32 | npm install
33 | ```
34 |
35 | * Test the code
36 |
37 | ```
38 | ```
39 |
40 | ## What to Check
41 | Verify that the following are valid
42 | * ...
43 |
44 | ## Other Information
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
103 | # idea
104 | .idea/
105 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: false
2 | language: python
3 | python:
4 | - "2.7"
5 | - "3.4"
6 | - "3.5"
7 | - "3.6"
8 | env:
9 | - AZURE_TEST_RUN_LIVE=true
10 | - AZURE_TEST_RUN_LIVE=false
11 | install:
12 | - pip install -U setuptools pip
13 | - pip install -r tests/test-requirements.txt
14 | script:
15 | - nosetests
16 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to bing-search-python samples
2 |
3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a
4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
5 | the rights to use your contribution. For details, visit https://cla.microsoft.com.
6 |
7 | When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
8 | a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
9 | provided by the bot. You will only need to do this once across all repos using our CLA.
10 |
11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
14 |
15 | - [Code of Conduct](#coc)
16 | - [Issues and Bugs](#issue)
17 | - [Feature Requests](#feature)
18 | - [Submission Guidelines](#submit)
19 |
20 | ## Code of Conduct
21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
22 |
23 | ## Found an Issue?
24 | If you find a bug in the source code or a mistake in the documentation, you can help us by
25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can
26 | [submit a Pull Request](#submit-pr) with a fix.
27 |
28 | ## Want a Feature?
29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub
30 | Repository. If you would like to *implement* a new feature, please submit an issue with
31 | a proposal for your work first, to be sure that we can use it.
32 |
33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr).
34 |
35 | ## Submission Guidelines
36 |
37 | ### Submitting an Issue
38 | Before you submit an issue, search the archive, maybe your question was already answered.
39 |
40 | If your issue appears to be a bug, and hasn't been reported, open a new issue.
41 | Help us to maximize the effort we can spend fixing issues and adding new
42 | features, by not reporting duplicate issues. Providing the following information will increase the
43 | chances of your issue being dealt with quickly:
44 |
45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps
46 | * **Version** - what version is affected (e.g. 0.1.2)
47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you
48 | * **Browsers and Operating System** - is this a problem with all browsers?
49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps
50 | * **Related Issues** - has a similar issue been reported before?
51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be
52 | causing the problem (line of code or commit)
53 |
54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/Azure-Samples/bing-search-python/issues/new.
55 |
56 | ### Submitting a Pull Request (PR)
57 | Before you submit your Pull Request (PR) consider the following guidelines:
58 |
59 | * Search the repository (https://github.com/Azure-Samples/bing-search-python/pulls) for an open or closed PR
60 | that relates to your submission. You don't want to duplicate effort.
61 |
62 | * Make your changes in a new git fork:
63 |
64 | * Commit your changes using a descriptive commit message
65 | * Push your fork to GitHub:
66 | * In GitHub, create a pull request
67 | * If we suggest changes then:
68 | * Make the required updates.
69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request):
70 |
71 | ```shell
72 | git rebase master -i
73 | git push -f
74 | ```
75 |
76 | That's it! Thank you for your contribution!
77 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Microsoft Corporation. All rights reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | page_type: sample
3 | languages:
4 | - python
5 | products:
6 | - azure
7 | description: "These samples will show you how to get up and running using the Python SDKs for various Cognitive Services services."
8 | urlFragment: cognitive-services-python-sdk-samples
9 | ---
10 |
11 | # Cognitive Services Python SDK Samples
12 |
13 | These samples will show you how to get up and running using the Python SDKs for various Cognitive Services services. They'll cover a few rudimentary use cases and hopefully express best practices for interacting with the data from these APIs.
14 |
15 | ## Features
16 |
17 | This project framework provides examples for the following services:
18 |
19 | ### Knowledge
20 | * Using the **QnA SDK** [azure-cognitiveservices-knowledge-qnamaker](http://pypi.python.org/pypi/azure-cognitiveservices-knowledge-qnamaker) for the [QnA API](https://azure.microsoft.com/en-us/services/cognitive-services/qna-maker/)
21 |
22 |
23 | ### Language
24 |
25 | * Using the **LUIS SDK** [azure-cognitiveservices-language-luis](http://pypi.python.org/pypi/azure-cognitiveservices-language-luis) for the [LUIS API](https://azure.microsoft.com/services/cognitive-services/language-understanding-intelligent-service/)
26 | * Using the **Bing Spell Check SDK** [azure-cognitiveservices-language-spellcheck](http://pypi.python.org/pypi/azure-cognitiveservices-language-spellcheck) for the [Bing Spell Check API](https://azure.microsoft.com/services/cognitive-services/spell-check/)
27 | * Using the **Text Analytics SDK** [azure-cognitiveservices-language-textanalytics](http://pypi.python.org/pypi/azure-cognitiveservices-language-textanalytics) for the [Text Analytics API](https://azure.microsoft.com/services/cognitive-services/text-analytics/)
28 |
29 | ### Search
30 |
31 | * Using the **Bing Autosuggest SDK** [azure-cognitiveservices-search-autosuggest](http://pypi.python.org/pypi/azure-cognitiveservices-search-autosuggest) for the [Autosuggest API](https://azure.microsoft.com/services/cognitive-services/autosuggest/)
32 | * Using the **Bing Custom Search SDK** [azure-cognitiveservices-search-customsearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-customsearch) for the [Custom Search API](https://azure.microsoft.com/services/cognitive-services/bing-custom-search/)
33 | * Using the **Bing Custom Image Search SDK** [azure-cognitiveservices-search-customimagesearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-customimagesearch) for the [Custom Image Search API](https://azure.microsoft.com/services/cognitive-services/bing-custom-search/)
34 | * Using the **Bing Entity Search SDK** [azure-cognitiveservices-search-entitysearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-entitysearch) for the [Entity Search API](https://azure.microsoft.com/services/cognitive-services/bing-entity-search-api/)
35 | * Using the **Bing Image Search SDK** [azure-cognitiveservices-search-imagesearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-imagesearch) for the [Image Search API](https://azure.microsoft.com/services/cognitive-services/bing-image-search-api/)
36 | * Using the **Bing News Search SDK** [azure-cognitiveservices-search-newssearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-newssearch) for the [News Search API](https://azure.microsoft.com/services/cognitive-services/bing-news-search-api/)
37 | * Using the **Bing Video Search SDK** [azure-cognitiveservices-search-videosearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-videosearch) for the [Video Search API](https://azure.microsoft.com/services/cognitive-services/bing-video-search-api/)
38 | * Using the **Bing Visual Search SDK** [azure-cognitiveservices-search-visualsearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-visualsearch) for the [Visual Search API](https://azure.microsoft.com/services/cognitive-services/bing-visual-search-api/)
39 | * Using the **Bing Web Search SDK** [azure-cognitiveservices-search-websearch](http://pypi.python.org/pypi/azure-cognitiveservices-search-websearch) for the [Web Search API](https://azure.microsoft.com/services/cognitive-services/bing-web-search-api/)
40 |
41 | ### Vision
42 |
43 | * Using the **Face SDK** [azure-cognitiveservices-vision-face](http://pypi.python.org/pypi/azure-cognitiveservices-vision-face) for the [Face API](https://azure.microsoft.com/services/cognitive-services/face/)
44 | * Using the **Computer Vision SDK** [azure-cognitiveservices-vision-computervision](http://pypi.python.org/pypi/azure-cognitiveservices-vision-computervision) for the [Computer Vision API](https://azure.microsoft.com/services/cognitive-services/computer-vision/)
45 | * Using the **Content Moderator SDK** [azure-cognitiveservices-vision-contentmoderator](http://pypi.python.org/pypi/azure-cognitiveservices-vision-contentmoderator) for the [Content Moderator API](https://azure.microsoft.com/services/cognitive-services/content-moderator/)
46 | * Using the **Custom Vision SDK** [azure-cognitiveservices-vision-customvision](http://pypi.python.org/pypi/azure-cognitiveservices-vision-customvision) for the [Custom Vision API](https://azure.microsoft.com/services/cognitive-services/custom-vision-service/)
47 | * Using the **Ink Recognizer SDK** [azure-cognitiveservices-inkrecognizer](https://pypi.org/project/azure-cognitiveservices-inkrecognizer/) for the [Ink Recognizer API](https://azure.microsoft.com/services/cognitive-services/ink-recognizer/)
48 |
49 | We provide several meta-packages to help you install several packages at a time. Please note that meta-packages are only recommended for development purpose. It's recommended in production to always pin specific version of individual packages.
50 |
51 | ## Getting Started
52 |
53 | ### Prerequisites
54 |
55 | 1. A cognitive services API key with which to authenticate the SDK's calls. [Create a new Azure account, and try Cognitive Services for free.](https://azure.microsoft.com/free/cognitive-services/)
56 |
57 | > Subscription keys are usually per service. For example, the subscription key for Spell Check will not be the same than Custom Search. Read the previous *sign up* link or the Azure portal for details on subscription keys.
58 |
59 | ### Installation
60 |
61 | 1. If you don't already have it, [install Python](https://www.python.org/downloads/).
62 |
63 | This sample (and the SDK) is compatible with Python 2.7, 3.3, 3.4, 3.5 and 3.6.
64 |
65 | 2. General recommendation for Python development is to use a Virtual Environment.
66 | For more information, see https://docs.python.org/3/tutorial/venv.html
67 |
68 | Install and initialize the virtual environment with the "venv" module on Python 3 (you must install [virtualenv](https://pypi.python.org/pypi/virtualenv) for Python 2.7):
69 |
70 | ```
71 | python -m venv mytestenv # Might be "python3" or "py -3.6" depending on your Python installation
72 | cd mytestenv
73 | source bin/activate # Linux shell (Bash, ZSH, etc.) only
74 | ./scripts/activate # PowerShell only
75 | ./scripts/activate.bat # Windows CMD only
76 | ```
77 |
78 | ### Quickstart
79 |
80 | 1. Clone the repository.
81 |
82 | ```
83 | git clone https://github.com/Azure-Samples/cognitive-services-python-sdk-samples.git
84 | ```
85 |
86 | 2. Install the dependencies using pip.
87 |
88 | ```
89 | cd cognitive-services-python-sdk-samples
90 | pip install -r requirements.txt
91 | ```
92 |
93 | 4. Set up the environment variable `LUIS_SUBSCRIPTION_KEY` with your key if you want to execute LUIS tests.
94 | 4. Set up the environment variable `SPELLCHECK_SUBSCRIPTION_KEY` with your key if you want to execute SpellCheck tests.
95 | 4. Set up the environment variable `TEXTANALYTICS_SUBSCRIPTION_KEY` with your key if you want to execute TextAnalytics tests. You might override too `TEXTANALYTICS_LOCATION` (westcentralus by default).
96 | 3. Set up the environment variable `AUTOSUGGEST_SUBSCRIPTION_KEY` with your key if you want to execute Autosuggest tests.
97 | 3. Set up the environment variable `CUSTOMSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute CustomSearch tests.
98 | 3. Set up the environment variable `CUSTOMIMAGESEARCH_SUBSCRIPTION_KEY` with your key if you want to execute CustomImageSearch tests.
99 | 3. Set up the environment variable `ENTITYSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute EntitySearch tests.
100 | 4. Set up the environment variable `IMAGESEARCH_SUBSCRIPTION_KEY` with your key if you want to execute ImageSearch tests.
101 | 4. Set up the environment variable `NEWSSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute NewsSearch tests.
102 | 4. Set up the environment variable `VIDEOSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute VideoSearch tests.
103 | 4. Set up the environment variable `VISUALSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute VideoSearch tests.
104 | 4. Set up the environment variable `WEBSEARCH_SUBSCRIPTION_KEY` with your key if you want to execute WebSearch tests.
105 | 4. Set up the environment variable `COMPUTERVISION_SUBSCRIPTION_KEY` with your key if you want to execute Computer Vision tests. You might override too `COMPUTERVISION_LOCATION` (westcentralus by default).
106 | 4. Set up the environment variable `CONTENTMODERATOR_SUBSCRIPTION_KEY` with your key if you want to execute Content Moderator tests. You might override too `CONTENTMODERATOR_LOCATION` (westcentralus by default).
107 | 4. Set up the environment variable `CUSTOMVISION_TRAINING_KEY` with your key and `CUSTOMVISION_PREDICTION_ID` with a valid prediction resource id if you want to execute CustomVision Training tests.
108 | 4. Set up the environment variable `CUSTOMVISION_PREDICTION_KEY` with your key and `CUSTOMVISION_PREDICTION_ID` with a valid prediction resource id if you want to execute CustomVision Prediction tests.
109 |
110 |
111 | ## Demo
112 |
113 | A demo app is included to show how to use the project.
114 |
115 | To run the complete demo, execute `python example.py`
116 |
117 | To run each individual demo, point directly to the file. For example (i.e. not complete list):
118 |
119 | 1. `python samples/language/spellcheck_samples.py`
120 | 2. `python samples/search/entity_search_samples.py`
121 | 3. `python samples/search/video_search_samples.py`
122 | 4. `python samples/vision/inkrecognizer_sample.py`
123 |
124 | To see the code of each example, simply look at the examples in the Samples folder. They are written to be isolated in scope so that you can see only what you're interested in.
125 |
126 | ## Resources
127 |
128 | - https://docs.microsoft.com/python/api/overview/azure/cognitive-services
129 | - https://github.com/Azure/azure-sdk-for-python
130 |
--------------------------------------------------------------------------------
/example.py:
--------------------------------------------------------------------------------
1 | """Sample launcher.
2 |
3 | This file is just the samples launcher. Nothing here os related
4 | to Cognitive Services. Look into the "samples" folder for actual code
5 | """
6 |
7 | import importlib
8 | import pkgutil
9 |
10 | # import logging
11 | # logging.basicConfig(level=logging.DEBUG)
12 |
13 | import samples.tools
14 |
15 |
16 | def run_all_samples():
17 | for _, section_name_name, ispkg in pkgutil.walk_packages(samples.__path__):
18 | if not ispkg:
19 | continue
20 | section_package_name = "samples."+section_name_name
21 | section_package = importlib.import_module(section_package_name)
22 | for _, sample_name, _ in pkgutil.iter_modules(section_package.__path__):
23 | sample_module = importlib.import_module(
24 | section_package_name+"."+sample_name)
25 | subkey_env_name = getattr(
26 | sample_module, "SUBSCRIPTION_KEY_ENV_NAME", None)
27 | if not subkey_env_name:
28 | continue
29 | print("Executing sample from ", sample_name)
30 | try:
31 | samples.tools.execute_samples(
32 | sample_module.__dict__, subkey_env_name)
33 | except samples.tools.SubscriptionKeyError as err:
34 | print("{}\n".format(err))
35 |
36 |
37 | if __name__ == "__main__":
38 | run_all_samples()
39 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | azure-cognitiveservices-knowledge-qnamaker==0.3.0
2 | azure-cognitiveservices-language-luis
3 | azure-cognitiveservices-language-spellcheck
4 | azure-ai-textanalytics==5.0.0
5 | azure-cognitiveservices-search-autosuggest==0.1.0
6 | azure-cognitiveservices-search-customsearch
7 | azure-cognitiveservices-search-entitysearch
8 | azure-cognitiveservices-search-imagesearch
9 | azure-cognitiveservices-search-newssearch==2.0.0
10 | azure-cognitiveservices-search-videosearch
11 | azure-cognitiveservices-search-visualsearch==0.2.0 # sample won't work with previous versions
12 | azure-cognitiveservices-search-websearch
13 | azure-cognitiveservices-vision-computervision==0.3.0 # sample won't work with previous versions
14 | azure-cognitiveservices-vision-contentmoderator==1.0.0 # sample won't work with previous versions
15 | azure-cognitiveservices-vision-customvision==0.4.0 # sample won't work with previous versions
16 | azure-cognitiveservices-vision-face
17 | azure-ai-anomalydetector==3.0.0b2 # sample won't work with previous versions
18 | azure-cognitiveservices-inkrecognizer==1.0.0b1
19 | pandas
20 |
--------------------------------------------------------------------------------
/samples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/__init__.py
--------------------------------------------------------------------------------
/samples/anomaly_detector_samples.py:
--------------------------------------------------------------------------------
1 | from azure.ai.anomalydetector import AnomalyDetectorClient
2 | from azure.ai.anomalydetector.models import DetectRequest, TimeSeriesPoint, TimeGranularity, \
3 | AnomalyDetectorError
4 | from azure.core.credentials import AzureKeyCredential
5 | import pandas as pd
6 | import os
7 |
8 |
9 | # Add your Azure Anomaly Detector subscription key to your environment variables.
10 | SUBSCRIPTION_KEY = os.environ["ANOMALY_DETECTOR_SUBSCRIPTION_KEY"]
11 |
12 | CSV_FOLDER = os.path.join(os.path.dirname(
13 | os.path.realpath(__file__)), "csv_files")
14 |
15 |
16 | def get_series_from_file(path):
17 | df = pd.read_csv(path, header=None, encoding="utf-8", parse_dates=[0])
18 | series = []
19 | for index, row in df.iterrows():
20 | series.append(TimeSeriesPoint(timestamp=row[0], value=row[1]))
21 | return series
22 |
23 |
24 | def get_request():
25 | series = get_series_from_file(os.path.join(
26 | CSV_FOLDER, "anomaly_detector_daily_series.csv"))
27 | return DetectRequest(series=series, granularity=TimeGranularity.daily)
28 |
29 |
30 | def entire_detect(subscription_key):
31 | print("Sample of detecting anomalies in the entire series.")
32 | # Add your Azure Anomaly Detector subscription key to your environment variables.
33 | endpoint = os.environ["ANOMALY_DETECTOR_ENDPOINT"]
34 |
35 | client = AnomalyDetectorClient(AzureKeyCredential(subscription_key), endpoint)
36 | request = get_request()
37 |
38 | try:
39 | response = client.detect_entire_series(request)
40 | except AnomalyDetectorError as e:
41 | print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message))
42 | except Exception as e:
43 | print(e)
44 |
45 | if any(response.is_anomaly):
46 | print('Anomaly was detected from the series at index:')
47 | for i, value in enumerate(response.is_anomaly):
48 | if value:
49 | print(i)
50 | else:
51 | print('No anomalies were detected in the time series.')
52 |
53 |
54 | def last_detect(subscription_key):
55 | print("Sample of detecting whether the latest point in series is anomaly.")
56 | # Add your Azure Anomaly Detector subscription key to your environment variables.
57 | endpoint = os.environ["ANOMALY_DETECTOR_ENDPOINT"]
58 |
59 | client = AnomalyDetectorClient(AzureKeyCredential(subscription_key), endpoint)
60 | request = get_request()
61 |
62 | try:
63 | response = client.detect_last_point(request)
64 | except AnomalyDetectorError as e:
65 | print('Error code: {}'.format(e.error.code), 'Error message: {}'.format(e.error.message))
66 | except Exception as e:
67 | print(e)
68 |
69 | if response.is_anomaly:
70 | print('The latest point is detected as anomaly.')
71 | else:
72 | print('The latest point is not detected as anomaly.')
73 |
74 |
75 | if __name__ == "__main__":
76 | import sys
77 | import os.path
78 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
79 | from tools import execute_samples
80 | execute_samples(globals(), SUBSCRIPTION_KEY)
81 |
--------------------------------------------------------------------------------
/samples/csv_files/anomaly_detector_daily_series.csv:
--------------------------------------------------------------------------------
1 | 2018-03-01T00:00:00Z,32858923
2 | 2018-03-02T00:00:00Z,29615278
3 | 2018-03-03T00:00:00Z,22839355
4 | 2018-03-04T00:00:00Z,25948736
5 | 2018-03-05T00:00:00Z,34139159
6 | 2018-03-06T00:00:00Z,33843985
7 | 2018-03-07T00:00:00Z,33637661
8 | 2018-03-08T00:00:00Z,32627350
9 | 2018-03-09T00:00:00Z,29881076
10 | 2018-03-10T00:00:00Z,22681575
11 | 2018-03-11T00:00:00Z,24629393
12 | 2018-03-12T00:00:00Z,34010679
13 | 2018-03-13T00:00:00Z,33893888
14 | 2018-03-14T00:00:00Z,33760076
15 | 2018-03-15T00:00:00Z,33093515
16 | 2018-03-16T00:00:00Z,29945555
17 | 2018-03-17T00:00:00Z,22676212
18 | 2018-03-18T00:00:00Z,25262514
19 | 2018-03-19T00:00:00Z,33631649
20 | 2018-03-20T00:00:00Z,34468310
21 | 2018-03-21T00:00:00Z,34212281
22 | 2018-03-22T00:00:00Z,38144434
23 | 2018-03-23T00:00:00Z,34662949
24 | 2018-03-24T00:00:00Z,24623684
25 | 2018-03-25T00:00:00Z,26530491
26 | 2018-03-26T00:00:00Z,35445003
27 | 2018-03-27T00:00:00Z,34250789
28 | 2018-03-28T00:00:00Z,33423012
29 | 2018-03-29T00:00:00Z,30744783
30 | 2018-03-30T00:00:00Z,25825128
31 | 2018-03-31T00:00:00Z,21244209
32 | 2018-04-01T00:00:00Z,22576956
33 | 2018-04-02T00:00:00Z,31957221
34 | 2018-04-03T00:00:00Z,33841228
35 | 2018-04-04T00:00:00Z,33554483
36 | 2018-04-05T00:00:00Z,32383350
37 | 2018-04-06T00:00:00Z,29494850
38 | 2018-04-07T00:00:00Z,22815534
39 | 2018-04-08T00:00:00Z,25557267
40 | 2018-04-09T00:00:00Z,34858252
41 | 2018-04-10T00:00:00Z,34750597
42 | 2018-04-11T00:00:00Z,34717956
43 | 2018-04-12T00:00:00Z,34132534
44 | 2018-04-13T00:00:00Z,30762236
45 | 2018-04-14T00:00:00Z,22504059
46 | 2018-04-15T00:00:00Z,26149060
47 | 2018-04-16T00:00:00Z,35250105
--------------------------------------------------------------------------------
/samples/face/find_similar.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 | import time
4 |
5 | from azure.cognitiveservices.vision.face import FaceClient
6 | from msrest.authentication import CognitiveServicesCredentials
7 | from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
8 |
9 | # NOTE: Replace this with a valid Face subscription key.
10 | SUBSCRIPTION_KEY = "INSERT KEY HERE"
11 |
12 | # You must use the same region as you used to get your subscription
13 | # keys. For example, if you got your subscription keys from westus,
14 | # replace "westcentralus" with "westus".
15 | FACE_LOCATION = "westcentralus"
16 |
17 | face_base_url = "https://{}.api.cognitive.microsoft.com".format(FACE_LOCATION)
18 | face_client = FaceClient(face_base_url, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
19 |
20 | # This image should contain a single face.
21 | remote_image_URL_1 = "https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"
22 |
23 | # This image should contain several faces, at least one of which is similar to the face in remote_image_URL_1.
24 | remote_image_URL_2 = "https://www.biography.com/.image/t_share/MTQ1NDY3OTIxMzExNzM3NjE3/john-f-kennedy---debating-richard-nixon.jpg"
25 |
26 | # Detect faces in a remote image.
27 | def detect_faces(face_client, image_url):
28 | print ("Detecting faces...")
29 | detected_faces = face_client.face.detect_with_url(url=image_url)
30 | if not detected_faces:
31 | raise Exception('No face detected from image {}'.format(image_url))
32 | if not detected_faces[0]:
33 | raise Exception("Parameter return_face_id of detect_with_stream or detect_with_url must be set to true (by default) for recognition purpose.")
34 | return detected_faces
35 |
36 | # Find similar faces to @face_ID in @face_IDs.
37 | def find_similar_faces(face_client, face_ID, face_IDs):
38 | print("Finding similar faces ...")
39 | return face_client.face.find_similar(face_id=face_ID, face_ids=face_IDs)
40 |
41 | # Detect a face in the first image.
42 | faces_1 = detect_faces(face_client, remote_image_URL_1)
43 | if not faces_1[0]:
44 | print("No faces detected in " + remote_image_URL_1 + ".")
45 | else:
46 | print("Face IDs of faces detected in " + remote_image_URL_1 + ":")
47 | for x in faces_1: print (x.face_id)
48 |
49 | print("Using first face ID.")
50 | face_ID = faces_1[0].face_id
51 |
52 | # Detect a list of faces in the second image.
53 | faces_2 = detect_faces(face_client, remote_image_URL_2)
54 | if not faces_2[0]:
55 | print("No faces detected in " + remote_image_URL_2 + ".")
56 | else:
57 | print("Face IDs of faces detected in " + remote_image_URL_2 + ":")
58 | for x in faces_2: print (x.face_id)
59 |
60 | # Search the faces detected in the second image to find a similar face to the first one.
61 | similar_faces = find_similar_faces(face_client, face_ID, list(map(lambda x: x.face_id, faces_2)))
62 | if not similar_faces[0]:
63 | print("No similar faces found in " + remote_image_URL_2 + ".")
64 | else:
65 | print("Similar faces found in " + remote_image_URL_2 + ":")
66 | for face in similar_faces:
67 | face_ID = face.face_id
68 | # SimilarFace only contains a Face ID, Persisted Face ID, and confidence score.
69 | # So we look up the Face ID in the list of DetectedFaces found in
70 | # remote_image_URL_2 to get the rest of the face information.
71 | face_info = next(x for x in faces_2 if x.face_id == face_ID)
72 | if face_info:
73 | print("Face ID: " + face_ID)
74 | print("Face rectangle:")
75 | print("Left: " + str(face_info.face_rectangle.left))
76 | print("Top: " + str(face_info.face_rectangle.top))
77 | print("Width: " + str(face_info.face_rectangle.width))
78 | print("Height: " + str(face_info.face_rectangle.height))
79 |
--------------------------------------------------------------------------------
/samples/face/identify.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 | import time
4 |
5 | from azure.cognitiveservices.vision.face import FaceClient
6 | from msrest.authentication import CognitiveServicesCredentials
7 | from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
8 |
9 | # NOTE: Replace this with a valid Face subscription key.
10 | SUBSCRIPTION_KEY = "INSERT KEY HERE"
11 |
12 | # You must use the same region as you used to get your subscription
13 | # keys. For example, if you got your subscription keys from westus,
14 | # replace "westcentralus" with "westus".
15 | FACE_LOCATION = "westcentralus"
16 |
17 | face_base_url = "https://{}.api.cognitive.microsoft.com".format(FACE_LOCATION)
18 | face_client = FaceClient(face_base_url, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
19 |
20 | # This image should contain a single face.
21 | remote_image_URL_1 = "https://www.biography.com/.image/t_share/MTQ1MzAyNzYzOTgxNTE0NTEz/john-f-kennedy---mini-biography.jpg"
22 |
23 | # This image should contain several faces, at least one of which is similar to the face in remote_image_URL_1.
24 | remote_image_URL_2 = "https://www.biography.com/.image/t_share/MTQ1NDY3OTIxMzExNzM3NjE3/john-f-kennedy---debating-richard-nixon.jpg"
25 |
26 | # Detect faces in a remote image.
27 | def detect_faces(face_client, image_url):
28 | print ("Detecting faces...")
29 | detected_faces = face_client.face.detect_with_url(url=image_url)
30 | if not detected_faces:
31 | raise Exception('No face detected from image {}'.format(image_url))
32 | if not detected_faces[0]:
33 | raise Exception("Parameter return_face_id of detect_with_stream or detect_with_url must be set to true (by default) for recognition purpose.")
34 | return detected_faces
35 |
36 | # Find similar faces to @face_ID in @face_IDs.
37 | def find_similar_faces(face_client, face_ID, face_IDs):
38 | print("Finding similar faces ...")
39 | return face_client.face.find_similar(face_id=face_ID, face_ids=face_IDs)
40 |
41 | # Detect a face in the first image.
42 | faces_1 = detect_faces(face_client, remote_image_URL_1)
43 | if not faces_1[0]:
44 | print("No faces detected in " + remote_image_URL_1 + ".")
45 | else:
46 | print("Face IDs of faces detected in " + remote_image_URL_1 + ":")
47 | for x in faces_1: print (x.face_id)
48 |
49 | print("Using first face ID.")
50 | face_ID = faces_1[0].face_id
51 |
52 | # Detect a list of faces in the second image.
53 | faces_2 = detect_faces(face_client, remote_image_URL_2)
54 | if not faces_2[0]:
55 | print("No faces detected in " + remote_image_URL_2 + ".")
56 | else:
57 | print("Face IDs of faces detected in " + remote_image_URL_2 + ":")
58 | for x in faces_2: print (x.face_id)
59 |
60 | # Search the faces detected in the second image to find a similar face to the first one.
61 | similar_faces = find_similar_faces(face_client, face_ID, list(map(lambda x: x.face_id, faces_2)))
62 | if not similar_faces[0]:
63 | print("No similar faces found in " + remote_image_URL_2 + ".")
64 | else:
65 | print("Similar faces found in " + remote_image_URL_2 + ":")
66 | for face in similar_faces:
67 | face_ID = face.face_id
68 | # SimilarFace only contains a Face ID, Persisted Face ID, and confidence score.
69 | # So we look up the Face ID in the list of DetectedFaces found in
70 | # remote_image_URL_2 to get the rest of the face information.
71 | face_info = next(x for x in faces_2 if x.face_id == face_ID)
72 | if face_info:
73 | print("Face ID: " + face_ID)
74 | print("Face rectangle:")
75 | print("Left: " + str(face_info.face_rectangle.left))
76 | print("Top: " + str(face_info.face_rectangle.top))
77 | print("Width: " + str(face_info.face_rectangle.width))
78 | print("Height: " + str(face_info.face_rectangle.height))
79 |
--------------------------------------------------------------------------------
/samples/knowledge/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/knowledge/__init__.py
--------------------------------------------------------------------------------
/samples/knowledge/qna_maker_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from azure.cognitiveservices.knowledge.qnamaker import QnAMakerClient
5 | from azure.cognitiveservices.knowledge.qnamaker.models import QnADTO, MetadataDTO, CreateKbDTO, OperationStateType, UpdateKbOperationDTO, UpdateKbOperationDTOAdd
6 | from msrest.authentication import CognitiveServicesCredentials
7 |
8 | # Add your QnaMaker subscription key and endpoint to your environment variables.
9 | SUBSCRIPTION_KEY = os.environ['QNA_MAKER_SUBSCRIPTION_KEY']
10 | QNA_ENDPOINT = os.environ['QNA_MAKER_ENDPOINT']
11 |
12 |
13 | def knowledge_based_crud_sample(subscription_key):
14 | """KnowledgeBasedCRUDSample.
15 |
16 | This will create, update, publish, download, then delete a knowledge base.
17 | """
18 | def _create_sample_kb(client):
19 | """Helper function for knowledge_based_crud_sample.
20 |
21 | This helper function takes in a QnAMakerClient and returns an operation of a created knowledge base.
22 | """
23 | qna = QnADTO(
24 | answer="You can use our REST APIs to manage your knowledge base.",
25 | questions=["How do I manage my knowledgebase?"],
26 | metadata=[MetadataDTO(name="Category", value="api")]
27 | )
28 | urls = [
29 | "https://docs.microsoft.com/en-in/azure/cognitive-services/qnamaker/faqs"]
30 | create_kb_dto = CreateKbDTO(
31 | name="QnA Maker FAQ from quickstart",
32 | qna_list=[qna],
33 | urls=urls
34 | )
35 | create_op = client.knowledgebase.create(
36 | create_kb_payload=create_kb_dto)
37 | create_op = _monitor_operation(client=client, operation=create_op)
38 | return create_op.resource_location.replace("/knowledgebases/", "")
39 |
40 | def _monitor_operation(client, operation):
41 | """Helper function for knowledge_based_crud_sample.
42 |
43 | This helper function takes in a QnAMakerClient and an operation, and loops until the operation has either succeeded
44 | or failed and returns the operation.
45 | """
46 | for i in range(20):
47 | if operation.operation_state in [OperationStateType.not_started, OperationStateType.running]:
48 | print("Waiting for operation: {} to complete.".format(
49 | operation.operation_id))
50 | time.sleep(5)
51 | operation = client.operations.get_details(
52 | operation_id=operation.operation_id)
53 | else:
54 | break
55 | if operation.operation_state != OperationStateType.succeeded:
56 | raise Exception("Operation {} failed to complete.".format(
57 | operation.operation_id))
58 | return operation
59 |
60 | client = QnAMakerClient(endpoint=QNA_ENDPOINT, credentials=CognitiveServicesCredentials(subscription_key))
61 |
62 | # Create a KB
63 | print("Creating KB...")
64 | kb_id = _create_sample_kb(client=client)
65 | print("Created KB with ID: {}".format(kb_id))
66 |
67 | # Update the KB
68 | print("Updating KB...")
69 | update_kb_operation_dto = UpdateKbOperationDTO(
70 | add=UpdateKbOperationDTOAdd(
71 | qna_list=[
72 | QnADTO(questions=["bye"], answer="goodbye")
73 | ]
74 | )
75 | )
76 | update_op = client.knowledgebase.update(
77 | kb_id=kb_id, update_kb=update_kb_operation_dto)
78 | _monitor_operation(client=client, operation=update_op)
79 |
80 | # Publish the KB
81 | print("Publishing KB...")
82 | client.knowledgebase.publish(kb_id=kb_id)
83 | print("KB Published.")
84 |
85 | # Download the KB
86 | print("Downloading KB...")
87 | kb_data = client.knowledgebase.download(kb_id=kb_id, environment="Prod")
88 | print("KB Downloaded. It has {} QnAs.".format(len(kb_data.qna_documents)))
89 |
90 | # Delete the KB
91 | print("Deleting KB...")
92 | client.knowledgebase.delete(kb_id=kb_id)
93 | print("KB Deleted.")
94 |
95 | knowledge_based_crud_sample(SUBSCRIPTION_KEY_ENV_NAME)
96 |
--------------------------------------------------------------------------------
/samples/language/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/__init__.py
--------------------------------------------------------------------------------
/samples/language/luis/LuisApp.json:
--------------------------------------------------------------------------------
1 | {
2 | "luis_schema_version": "2.1.0",
3 | "versionId": "0.1",
4 | "name": "LuisBot",
5 | "desc": "",
6 | "culture": "en-us",
7 | "intents": [
8 | {
9 | "name": "dateintent"
10 | },
11 | {
12 | "name": "Help"
13 | },
14 | {
15 | "name": "None"
16 | },
17 | {
18 | "name": "SearchHotels"
19 | },
20 | {
21 | "name": "ShowHotelsReviews"
22 | }
23 | ],
24 | "entities": [
25 | {
26 | "name": "AirportCode"
27 | },
28 | {
29 | "name": "Hotel"
30 | }
31 | ],
32 | "composites": [],
33 | "closedLists": [],
34 | "bing_entities": [
35 | "datetimeV2"
36 | ],
37 | "model_features": [
38 | {
39 | "name": "Near",
40 | "mode": true,
41 | "words": "near,around,close,nearby",
42 | "activated": true
43 | },
44 | {
45 | "name": "Show",
46 | "mode": true,
47 | "words": "show,find,look,search",
48 | "activated": true
49 | }
50 | ],
51 | "regex_features": [
52 | {
53 | "name": "AirportCodeRegex",
54 | "pattern": "[a-z]{3}",
55 | "activated": true
56 | }
57 | ],
58 | "utterances": [
59 | {
60 | "text": "i need help",
61 | "intent": "Help",
62 | "entities": []
63 | },
64 | {
65 | "text": "help me",
66 | "intent": "Help",
67 | "entities": []
68 | },
69 | {
70 | "text": "tomorrow",
71 | "intent": "dateintent",
72 | "entities": []
73 | },
74 | {
75 | "text": "search for hotels in seattle",
76 | "intent": "SearchHotels",
77 | "entities": []
78 | },
79 | {
80 | "text": "what can i do?",
81 | "intent": "Help",
82 | "entities": []
83 | },
84 | {
85 | "text": "next monday",
86 | "intent": "dateintent",
87 | "entities": []
88 | },
89 | {
90 | "text": "next year",
91 | "intent": "dateintent",
92 | "entities": []
93 | },
94 | {
95 | "text": "look for hotels in miami",
96 | "intent": "SearchHotels",
97 | "entities": []
98 | },
99 | {
100 | "text": "show me hotels in california",
101 | "intent": "SearchHotels",
102 | "entities": []
103 | },
104 | {
105 | "text": "show me the reviews of the amazing bot resort",
106 | "intent": "ShowHotelsReviews",
107 | "entities": [
108 | {
109 | "entity": "Hotel",
110 | "startPos": 23,
111 | "endPos": 44
112 | }
113 | ]
114 | },
115 | {
116 | "text": "can i see the reviews of extended bot hotel?",
117 | "intent": "ShowHotelsReviews",
118 | "entities": [
119 | {
120 | "entity": "Hotel",
121 | "startPos": 25,
122 | "endPos": 42
123 | }
124 | ]
125 | },
126 | {
127 | "text": "find reviews of hotelxya",
128 | "intent": "ShowHotelsReviews",
129 | "entities": [
130 | {
131 | "entity": "Hotel",
132 | "startPos": 16,
133 | "endPos": 23
134 | }
135 | ]
136 | },
137 | {
138 | "text": "show me reviews of the amazing hotel",
139 | "intent": "ShowHotelsReviews",
140 | "entities": [
141 | {
142 | "entity": "Hotel",
143 | "startPos": 19,
144 | "endPos": 35
145 | }
146 | ]
147 | },
148 | {
149 | "text": "what are the available options?",
150 | "intent": "Help",
151 | "entities": []
152 | },
153 | {
154 | "text": "best hotels in seattle",
155 | "intent": "SearchHotels",
156 | "entities": []
157 | },
158 | {
159 | "text": "hotels in los angeles",
160 | "intent": "SearchHotels",
161 | "entities": []
162 | },
163 | {
164 | "text": "can you show me hotels from los angeles?",
165 | "intent": "SearchHotels",
166 | "entities": []
167 | },
168 | {
169 | "text": "can you show me the reviews of the amazing resort & hotel",
170 | "intent": "ShowHotelsReviews",
171 | "entities": [
172 | {
173 | "entity": "Hotel",
174 | "startPos": 31,
175 | "endPos": 56
176 | }
177 | ]
178 | },
179 | {
180 | "text": "what are the reviews of the hotel bot framework?",
181 | "intent": "ShowHotelsReviews",
182 | "entities": [
183 | {
184 | "entity": "Hotel",
185 | "startPos": 24,
186 | "endPos": 46
187 | }
188 | ]
189 | },
190 | {
191 | "text": "find hotels near eze",
192 | "intent": "SearchHotels",
193 | "entities": [
194 | {
195 | "entity": "AirportCode",
196 | "startPos": 17,
197 | "endPos": 19
198 | }
199 | ]
200 | },
201 | {
202 | "text": "where can i stay near nnn?",
203 | "intent": "SearchHotels",
204 | "entities": [
205 | {
206 | "entity": "AirportCode",
207 | "startPos": 22,
208 | "endPos": 24
209 | }
210 | ]
211 | },
212 | {
213 | "text": "show hotels near att airport",
214 | "intent": "SearchHotels",
215 | "entities": [
216 | {
217 | "entity": "AirportCode",
218 | "startPos": 17,
219 | "endPos": 19
220 | }
221 | ]
222 | },
223 | {
224 | "text": "find hotels near agl",
225 | "intent": "SearchHotels",
226 | "entities": [
227 | {
228 | "entity": "AirportCode",
229 | "startPos": 17,
230 | "endPos": 19
231 | }
232 | ]
233 | },
234 | {
235 | "text": "find hotels around eze airport",
236 | "intent": "SearchHotels",
237 | "entities": [
238 | {
239 | "entity": "AirportCode",
240 | "startPos": 19,
241 | "endPos": 21
242 | }
243 | ]
244 | },
245 | {
246 | "text": "01/7",
247 | "intent": "dateintent",
248 | "entities": []
249 | }
250 | ]
251 | }
--------------------------------------------------------------------------------
/samples/language/luis/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | services: cognitive-services, luis, language-understanding
3 | platforms: python
4 | author: cahann, lmazuel
5 | ---
6 |
7 | # Cognitive Services: LUIS Runtime Sample
8 |
9 | An example demonstrating how:
10 | - to consume the LUIS Authoring SDK to build an app or manage it
11 | - to consume the LUIS Runtime SDK to predict user utterances.
12 |
13 | ## Prerequisites
14 |
15 | The minimum prerequisites to run this sample are:
16 | * A [LUIS.ai account](https://www.luis.ai/) where to upload the sample's LUIS model.
17 |
18 | The first step is to get your Authoring Key. Go to the home page, [www.luis.ai](https://www.luis.ai/), and log in. After creating your LUIS account, a starter key, also known as a authoring key, is created automatically for LUIS account. To find the authoring key, click on the account name in the upper-right navigation bar to open [Account Settings](https://www.luis.ai/user/settings), which displays the Authoring Key.
19 |
20 | 
21 |
22 | Set the `LUIS_SUBSCRIPTION_KEY` environment variable to this authoring key to continue.
23 |
24 | ## Cognitive Services: LUIS Authoring Sample
25 |
26 | We provide two samples:
27 |
28 | - The booking sample who is a complete example on how to create an app, create intents, utterances, training and publishing the app
29 | - The management example shows management operations: import or export an app/version, listing the different versions of an app, cloning a version, etc.
30 |
31 | ## Cognitive Services: LUIS Runtime Sample
32 |
33 | #### LUIS Application
34 |
35 | The first step to using LUIS is to create or import an application. Go to the home page, [www.luis.ai](https://www.luis.ai/), and log in. After creating your LUIS account you'll be able to Import an Existing Application where can you can select a local copy of the LuisApp.json file an import it.
36 |
37 | 
38 |
39 | If you want to test this sample, you have to import the pre-build [LuisApp.json](LuisApp.json) file to your LUIS account.
40 |
41 | Once you imported the application you'll need to "train" the model ([Training](https://docs.microsoft.com/en-us/azure/cognitive-services/luis/train-test)) before you can "Publish" the model in an HTTP endpoint. For more information, take a look at [Publishing a Model](https://docs.microsoft.com/en-us/azure/cognitive-services/luis/publishapp).
42 |
43 | Finally, edit the [luis_runtime_samples.py](luis_runtime_samples.py) file and update the attribute placeholders with the values corresponding to your Application and Endpoint where the application was deployed.
44 |
45 | #### Where to find the Application ID and Subscription Key
46 |
47 | You'll need these two values to configure the LuisDialog through the LuisModel attribute:
48 |
49 | 1. Application ID
50 |
51 | You can find the App ID in the LUIS application's settings.
52 |
53 | 
54 |
55 | 2. Subscription Key and Endpoint
56 |
57 | Click on the Publish App link from the top of the LUIS application dashboard. Once your app is published, copy the Endpoint and Key String from *Starter_Key* from the Endpoints table on the Publish App page.
58 |
59 | 
60 |
61 |
62 | ### Code Highlights
63 |
64 | One of the key problems in human-computer interactions is the ability of the computer to understand what a person wants, and to find the pieces of information that are relevant to their intent. In the LUIS application, you will bundle together the intents and entities that are important to your task. Read more about [Planning an Application](https://docs.microsoft.com/en-us/azure/cognitive-services/luis/plan-your-app) in the [LUIS Docs](https://docs.microsoft.com/en-us/azure/cognitive-services/luis/).
65 |
66 | Once your model is set, you can invoke the LUIS Runtime API to analyze user input and obtain its intent and possible entities.
67 |
68 | From Python, use the [azure-cognitiveservices-language-luis](http://pypi.python.org/pypi/azure-cognitiveservices-language-luis) package.
69 |
70 | ````python
71 | from azure.cognitiveservices.language.luis.runtime import LUISRuntimeClient
72 | from msrest.authentication import CognitiveServicesCredentials
73 |
74 | // Create client with SubscriptionKey and Endpoint
75 | client = LUISRuntimeClient(
76 | 'https://westus.api.cognitive.microsoft.com', # Change "westus" to your region if necessary
77 | CognitiveServicesCredentials("[LUIS_SUBSCRIPTION_KEY]"), # Put your LUIS Subscription key
78 | )
79 |
80 | // Predict
81 | luis_result = client.prediction.resolve(
82 | "[LUIS_APPLICATION_ID]", # Put your LUIS Application ID
83 | "Text to Predict or User input"
84 | )
85 | ````
86 |
87 | The LuisResult object contains the possible detected intents and entities that could be extracted from the input.
88 |
89 | ### Outcome
90 |
91 | You will see the following when running the application:
92 |
93 | 
94 |
95 | ### More Information
96 |
97 | To get more information about how to get started in Bot Builder for .NET and Conversations please review the following resources:
98 | * [Language Understanding Intelligent Service](https://azure.microsoft.com/en-us/services/cognitive-services/language-understanding-intelligent-service/)
99 | * [LUIS.ai](https://www.luis.ai)
100 | * [LUIS Docs](https://docs.microsoft.com/en-us/azure/cognitive-services/luis/home)
101 | * [LUIS Runtime API v2 - Specification](https://github.com/Azure/azure-rest-api-specs/tree/master/specification/cognitiveservices/data-plane/LUIS/Runtime)
102 |
--------------------------------------------------------------------------------
/samples/language/luis/images/outcome.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/luis/images/outcome.png
--------------------------------------------------------------------------------
/samples/language/luis/images/prereqs-apikey.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/luis/images/prereqs-apikey.png
--------------------------------------------------------------------------------
/samples/language/luis/images/prereqs-appid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/luis/images/prereqs-appid.png
--------------------------------------------------------------------------------
/samples/language/luis/images/prereqs-import.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/luis/images/prereqs-import.png
--------------------------------------------------------------------------------
/samples/language/luis/images/programmatic-key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/language/luis/images/programmatic-key.png
--------------------------------------------------------------------------------
/samples/language/luis/luis_authoring_samples.py:
--------------------------------------------------------------------------------
1 | import json
2 | import time
3 | import datetime
4 | from pprint import pprint
5 |
6 | from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
7 |
8 | from msrest.authentication import CognitiveServicesCredentials
9 |
10 | SUBSCRIPTION_KEY_ENV_NAME = "LUIS_SUBSCRIPTION_KEY"
11 |
12 |
13 | def booking_app(subscription_kuy):
14 | """Authoring.
15 |
16 | This will create a LUIS Booking application, train and publish it.
17 | """
18 | client = LUISAuthoringClient(
19 | 'https://westus.api.cognitive.microsoft.com',
20 | CognitiveServicesCredentials(subscription_key),
21 | )
22 |
23 | try:
24 | # Create a LUIS app
25 | default_app_name = "Contoso-{}".format(datetime.datetime.now())
26 | version_id = "0.1"
27 |
28 | print("Creating App {}, version {}".format(
29 | default_app_name, version_id))
30 |
31 | app_id = client.apps.add({
32 | 'name': default_app_name,
33 | 'initial_version_id': version_id,
34 | 'description': "New App created with LUIS Python sample",
35 | 'culture': 'en-us',
36 | })
37 | print("Created app {}".format(app_id))
38 |
39 | # Add information into the model
40 |
41 | print("\nWe'll create two new entities.")
42 | print("The \"Destination\" simple entity will hold the flight destination.")
43 | print("The \"Class\" hierarchical entity will accept \"First\", \"Business\" and \"Economy\" values.")
44 |
45 | destination_name = "Destination"
46 | destination_id = client.model.add_entity(
47 | app_id,
48 | version_id,
49 | destination_name
50 | )
51 | print("{} simple entity created with id {}".format(
52 | destination_name,
53 | destination_id
54 | ))
55 |
56 | class_name = "Class"
57 | class_id = client.model.add_hierarchical_entity(
58 | app_id,
59 | version_id,
60 | name=class_name,
61 | children=["First", "Business", "Economy"]
62 | )
63 | print("{} hierarchical entity created with id {}".format(
64 | class_name,
65 | class_id
66 | ))
67 |
68 | print("\nWe'll now create the \"Flight\" composite entity including \"Class\" and \"Destination\".")
69 |
70 | flight_name = "Flight"
71 | flight_id = client.model.add_composite_entity(
72 | app_id,
73 | version_id,
74 | name=flight_name,
75 | children=[class_name, destination_name]
76 | )
77 | print("{} composite entity created with id {}".format(
78 | flight_name,
79 | flight_id
80 | ))
81 |
82 | find_economy_to_madrid = "find flights in economy to Madrid"
83 | find_first_to_london = "find flights to London in first class"
84 |
85 | print("\nWe'll create a new \"FindFlights\" intent including the following utterances:")
86 | print(" - "+find_economy_to_madrid)
87 | print(" - "+find_first_to_london)
88 |
89 | intent_name = "FindFlights"
90 | intent_id = client.model.add_intent(
91 | app_id,
92 | version_id,
93 | intent_name
94 | )
95 | print("{} intent created with id {}".format(
96 | intent_name,
97 | intent_id
98 | ))
99 |
100 | def get_example_label(utterance, entity_name, value):
101 | """Build a EntityLabelObject.
102 |
103 | This will find the "value" start/end index in "utterance", and assign it to "entity name"
104 | """
105 | utterance = utterance.lower()
106 | value = value.lower()
107 | return {
108 | 'entity_name': entity_name,
109 | 'start_char_index': utterance.find(value),
110 | 'end_char_index': utterance.find(value) + len(value)
111 | }
112 |
113 | utterances = [{
114 | 'text': find_economy_to_madrid,
115 | 'intent_name': intent_name,
116 | 'entity_labels': [
117 | get_example_label(find_economy_to_madrid,
118 | "Flight", "economy to madrid"),
119 | get_example_label(find_economy_to_madrid,
120 | "Destination", "Madrid"),
121 | get_example_label(find_economy_to_madrid, "Class", "economy"),
122 | ]
123 | }, {
124 | 'text': find_first_to_london,
125 | 'intent_name': intent_name,
126 | 'entity_labels': [
127 | get_example_label(find_first_to_london,
128 | "Flight", "London in first class"),
129 | get_example_label(find_first_to_london,
130 | "Destination", "London"),
131 | get_example_label(find_first_to_london, "Class", "first"),
132 | ]
133 | }]
134 | utterances_result = client.examples.batch(
135 | app_id,
136 | version_id,
137 | utterances
138 | )
139 |
140 | print("\nUtterances added to the {} intent".format(intent_name))
141 |
142 | # Training the model
143 | print("\nWe'll start training your app...")
144 |
145 | async_training = client.train.train_version(app_id, version_id)
146 | is_trained = async_training.status == "UpToDate"
147 |
148 | trained_status = ["UpToDate", "Success"]
149 | while not is_trained:
150 | time.sleep(1)
151 | status = client.train.get_status(app_id, version_id)
152 | is_trained = all(
153 | m.details.status in trained_status for m in status)
154 |
155 | print("Your app is trained. You can now go to the LUIS portal and test it!")
156 |
157 | # Publish the app
158 | print("\nWe'll start publishing your app...")
159 |
160 | publish_result = client.apps.publish(
161 | app_id,
162 | {
163 | 'version_id': version_id,
164 | 'is_staging': False,
165 | 'region': 'westus'
166 | }
167 | )
168 | endpoint = publish_result.endpoint_url + \
169 | "?subscription-key=" + subscription_key + "&q="
170 | print("Your app is published. You can now go to test it on\n{}".format(endpoint))
171 |
172 | except Exception as err:
173 | print("Encountered exception. {}".format(err))
174 |
175 |
176 | def management(subscription_key):
177 | """Managing
178 |
179 | This will show how to manage your LUIS applications.
180 | """
181 | client = LUISAuthoringClient(
182 | 'https://westus.api.cognitive.microsoft.com',
183 | CognitiveServicesCredentials(subscription_key),
184 | )
185 |
186 | try:
187 | # Create a LUIS app
188 | default_app_name = "Contoso-{}".format(datetime.datetime.now())
189 | version_id = "0.1"
190 |
191 | print("Creating App {}, version {}".format(
192 | default_app_name, version_id))
193 |
194 | app_id = client.apps.add({
195 | 'name': default_app_name,
196 | 'initial_version_id': version_id,
197 | 'description': "New App created with LUIS Python sample",
198 | 'culture': 'en-us',
199 | })
200 | print("Created app {}".format(app_id))
201 |
202 | # Listing app
203 | print("\nList all apps")
204 | for app in client.apps.list():
205 | print("\t->App: '{}'".format(app.name))
206 |
207 | # Cloning a version
208 | print("\nCloning version 0.1 into 0.2")
209 | client.versions.clone(
210 | app_id,
211 | "0.1", # Source
212 | "0.2" # New version name
213 | )
214 | print("Your app version has been cloned.")
215 |
216 | # Export the version
217 | print("\nExport version 0.2 as JSON")
218 | luis_app = client.versions.export(
219 | app_id,
220 | "0.2"
221 | )
222 | luis_app_as_json = json.dumps(luis_app.serialize())
223 | # You can now save this JSON string as a file
224 |
225 | # Import the version
226 | print("\nImport previously exported version as 0.3")
227 | luis_app
228 | client.versions.import_method(
229 | app_id,
230 | json.loads(luis_app_as_json),
231 | "0.3"
232 | )
233 |
234 | # Listing versions
235 | print("\nList all versions in this app")
236 | for version in client.versions.list(app_id):
237 | print("\t->Version: '{}', training status: {}".format(version.version,
238 | version.training_status))
239 |
240 | # Print app details
241 | print("\nPrint app '{}' details".format(default_app_name))
242 | details = client.apps.get(app_id)
243 | # as_dict "dictify" the object, by default it's attribute based. e.g. details.name
244 | pprint(details.as_dict())
245 |
246 | # Print version details
247 | print("\nPrint version '{}' details".format(version_id))
248 | details = client.versions.get(app_id, version_id)
249 | # as_dict "dictify" the object, by default it's attribute based. e.g. details.name
250 | pprint(details.as_dict())
251 |
252 | # Delete an app
253 | print("\nDelete app '{}'".format(default_app_name))
254 | client.apps.delete(app_id)
255 | print("App deleted!")
256 |
257 | except Exception as err:
258 | print("Encountered exception. {}".format(err))
259 |
260 |
261 | if __name__ == "__main__":
262 | import sys
263 | import os.path
264 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
265 | from tools import execute_samples
266 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
267 |
--------------------------------------------------------------------------------
/samples/language/luis/luis_runtime_samples.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path
3 | from pprint import pprint
4 |
5 | from azure.cognitiveservices.language.luis.runtime import LUISRuntimeClient
6 |
7 | from msrest.authentication import CognitiveServicesCredentials
8 |
9 | SUBSCRIPTION_KEY_ENV_NAME = "LUIS_SUBSCRIPTION_KEY"
10 |
11 | CWD = os.path.dirname(__file__)
12 |
13 |
14 | def runtime(subscription_key):
15 | """Resolve.
16 |
17 | This will execute LUIS prediction
18 | """
19 | client = LUISRuntimeClient(
20 | 'https://westus.api.cognitive.microsoft.com',
21 | CognitiveServicesCredentials(subscription_key),
22 | )
23 |
24 | try:
25 | query = "Look for hotels near LAX airport"
26 | print("Executing query: {}".format(query))
27 | result = client.prediction.resolve(
28 | "bce13896-4de3-4783-9696-737d8fde8cd1", # LUIS Application ID
29 | query
30 | )
31 |
32 | print("\nDetected intent: {} (score: {:d}%)".format(
33 | result.top_scoring_intent.intent,
34 | int(result.top_scoring_intent.score*100)
35 | ))
36 | print("Detected entities:")
37 | for entity in result.entities:
38 | print("\t-> Entity '{}' (type: {}, score:{:d}%)".format(
39 | entity.entity,
40 | entity.type,
41 | int(entity.additional_properties['score']*100)
42 | ))
43 | print("\nComplete result object as dictionnary")
44 | pprint(result.as_dict())
45 |
46 | except Exception as err:
47 | print("Encountered exception. {}".format(err))
48 |
49 |
50 | if __name__ == "__main__":
51 | import sys
52 | import os.path
53 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
54 | from tools import execute_samples
55 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
56 |
--------------------------------------------------------------------------------
/samples/language/spellcheck_samples.py:
--------------------------------------------------------------------------------
1 | from azure.cognitiveservices.language.spellcheck import SpellCheckAPI
2 | from msrest.authentication import CognitiveServicesCredentials
3 |
4 | # Add your Bing Spell Check subscription key to your environment variables.
5 | SUBSCRIPTION_KEY = os.environ['BING_SPELL_CHECK_SUBSCRIPTION_KEY']
6 |
7 |
8 | def spellcheck(subscription_key):
9 | """SpellCheck.
10 |
11 | This will do a search for misspelled query and parse the response.
12 | """
13 | client = SpellCheckAPI(CognitiveServicesCredentials(subscription_key))
14 |
15 | try:
16 | result = client.spell_checker("Bill Gatas", mode="proof")
17 | print("Correction for Query# \"bill gatas\"")
18 |
19 | if result.flagged_tokens:
20 | first_spellcheck_result = result.flagged_tokens[0]
21 |
22 | print("SpellCheck result count: {}".format(
23 | len(result.flagged_tokens)))
24 | print("First SpellCheck token: {}".format(
25 | first_spellcheck_result.token))
26 | print("First SpellCheck type: {}".format(
27 | first_spellcheck_result.type))
28 | print("First SpellCheck suggestion count: {}".format(
29 | len(first_spellcheck_result.suggestions)))
30 |
31 | if first_spellcheck_result.suggestions:
32 | first_suggestion = first_spellcheck_result.suggestions[0]
33 | print("First SpellCheck suggestion score: {}".format(
34 | first_suggestion.score))
35 | print("First SpellCheck suggestion: {}".format(
36 | first_suggestion.suggestion))
37 | else:
38 | print("Couldn't get any Spell check results!")
39 |
40 | else:
41 | print("Didn't see any SpellCheck results..")
42 |
43 | except Exception as err:
44 | print("Encountered exception. {}".format(err))
45 |
46 |
47 | if __name__ == "__main__":
48 | import sys, os.path
49 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
50 | from samples.tools import execute_samples
51 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
52 |
--------------------------------------------------------------------------------
/samples/search/TestImages/image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/search/TestImages/image.jpg
--------------------------------------------------------------------------------
/samples/search/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/search/__init__.py
--------------------------------------------------------------------------------
/samples/search/autosuggest_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.autosuggest import AutoSuggestClient
4 | from azure.cognitiveservices.search.autosuggest.models import (
5 | Suggestions,
6 | SuggestionsSuggestionGroup,
7 | SearchAction,
8 | ErrorResponseException
9 | )
10 | from msrest.authentication import CognitiveServicesCredentials
11 |
12 | # Add your Bing Autosuggest subscription key to your environment variables.
13 | SUBSCRIPTION_KEY = os.environ['BING_AUTOSUGGEST_SUBSCRIPTION_KEY']
14 | ENDPOINT = os.environ['BING_AUTOSUGGEST_ENDPOINT']
15 |
16 | def autosuggest_lookup(subscription_key):
17 | """AutoSuggestLookup.
18 |
19 | This will look up a single query (Xbox) and print out name and url for first web result.
20 | """
21 | client = AutoSuggestClient(
22 | endpoint=ENDPOINT,
23 | credentials=CognitiveServicesCredentials(subscription_key)
24 | )
25 |
26 | try:
27 | suggestions = client.auto_suggest(
28 | query="Satya Nadella") # type: Suggestions
29 |
30 | if suggestions.suggestion_groups:
31 | print("Searched for \"Satya Nadella\" and found suggestions:")
32 | suggestion_group = suggestions.suggestion_groups[0] # type: SuggestionsSuggestionGroup
33 | for suggestion in suggestion_group.search_suggestions: # type: SearchAction
34 | print("....................................")
35 | print(suggestion.query)
36 | print(suggestion.display_text)
37 | print(suggestion.url)
38 | print(suggestion.search_kind)
39 | else:
40 | print("Didn't see any suggestion..")
41 |
42 | except Exception as err:
43 | print("Encountered exception. {}".format(err))
44 |
45 |
46 | def error(subscription_key):
47 | """Error.
48 |
49 | This triggers a bad request and shows how to read the error response.
50 | """
51 |
52 | # Breaking the subscription key on purpose
53 | client = AutoSuggestClient(
54 | endpoint=ENDPOINT,
55 | credentials=CognitiveServicesCredentials(subscription_key+"1")
56 | )
57 |
58 | try:
59 | suggestions = client.auto_suggest(
60 | query="Satya Nadella", market="no-ty")
61 | except ErrorResponseException as err:
62 | # The status code of the error should be a good indication of what occurred. However, if you'd like more details, you can dig into the response.
63 | # Please note that depending on the type of error, the response schema might be different, so you aren't guaranteed a specific error response schema.
64 |
65 | print("Exception occurred, status code {} with reason {}.\n".format(
66 | err.response.status_code, err))
67 |
68 | # if you'd like more descriptive information (if available)
69 | if err.error.errors:
70 | print("This is the errors I have:")
71 | for error in err.error.errors:
72 | print("Parameter \"{}\" has an invalid value \"{}\". SubCode is \"{}\". Detailed message is \"{}\"".format(
73 | error.parameter, error.value, error.sub_code, error.message))
74 | else:
75 | print("There was no details on the error.")
76 |
77 |
78 | if __name__ == "__main__":
79 | import sys
80 | import os.path
81 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
82 | from tools import execute_samples
83 | execute_samples(globals(), SUBSCRIPTION_KEY)
84 |
--------------------------------------------------------------------------------
/samples/search/custom_image_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.customimagesearch import CustomImageSearchClient
4 | from msrest.authentication import CognitiveServicesCredentials
5 |
6 | # Add your Bing Custom Search subscription key to your environment variables.
7 | SUBSCRIPTION_KEY = os.environ['BING_CUSTOM_SEARCH_SUBSCRIPTION_KEY']
8 | ENDPOINT = os.environ['BING_CUSTOM_SEARCH_ENDPOINT']
9 |
10 | def custom_image_search_result_lookup(subscription_key):
11 | """CustomImageSearchResultLookup.
12 |
13 | This will look up a single query (Xbox) and print out number of results, insights token, thumbnail url, content url for the first image result
14 | """
15 |
16 | client = CustomImageSearchClient(
17 | endpoint=ENDPOINT,
18 | credentials=CognitiveServicesCredentials(subscription_key))
19 | try:
20 | image_results = client.custom_instance.image_search(
21 | query="Xbox", custom_config=1)
22 | print("Searched for Query \" Xbox \"")
23 |
24 | # WebPages
25 | if image_results.value:
26 | # find the first web page
27 | first_image_result = image_results.value[0]
28 |
29 | if first_image_result:
30 | print("Image result count: {}".format(len(image_results.value)))
31 | print("First image insights token: {}".format(
32 | first_image_result.image_insights_token))
33 | print("First image thumbnail url: {}".format(
34 | first_image_result.thumbnail_url))
35 | print("First image content url: {}".format(
36 | first_image_result.content_url))
37 | else:
38 | print("Couldn't find image results!")
39 | else:
40 | print("Couldn't find image results!")
41 | except Exception as e:
42 | print("encountered exception. " + str(e))
43 |
44 |
45 | if __name__ == "__main__":
46 | import sys
47 | import os.path
48 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
49 | from tools import execute_samples
50 | execute_samples(globals(), SUBSCRIPTION_KEY)
51 |
--------------------------------------------------------------------------------
/samples/search/custom_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.customsearch import CustomSearchClient
4 | from msrest.authentication import CognitiveServicesCredentials
5 |
6 | SUBSCRIPTION_KEY = os.environ['BING_CUSTOM_SEARCH_SUBSCRIPTION_KEY']
7 | ENDPOINT = os.environ['BING_CUSTOM_SEARCH_ENDPOINT']
8 |
9 | def custom_search_web_page_result_lookup(subscription_key):
10 | """CustomSearch.
11 |
12 | This will look up a single query (Xbox) and print out name and url for first web result.
13 | """
14 |
15 | client = CustomSearchClient(
16 | endpoint=ENDPOINT,
17 | credentials=CognitiveServicesCredentials(subscription_key))
18 |
19 | try:
20 | web_data = client.custom_instance.search(query="xbox", custom_config=1)
21 | print("Searched for Query 'xbox'")
22 |
23 | if web_data.web_pages.value:
24 | first_web_result = web_data.web_pages.value[0]
25 | print("Web Pages result count: {}".format(
26 | len(web_data.web_pages.value)))
27 | print("First Web Page name: {}".format(first_web_result.name))
28 | print("First Web Page url: {}".format(first_web_result.url))
29 | else:
30 | print("Didn't see any web data..")
31 |
32 | except Exception as err:
33 | print("Encountered exception. {}".format(err))
34 |
35 |
36 | if __name__ == "__main__":
37 | import sys, os.path
38 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
39 | from samples.tools import execute_samples
40 | execute_samples(globals(), SUBSCRIPTION_KEY)
41 |
--------------------------------------------------------------------------------
/samples/search/entity_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.entitysearch import EntitySearchClient
4 | from azure.cognitiveservices.search.entitysearch.models import Place, ErrorResponseException
5 | from msrest.authentication import CognitiveServicesCredentials
6 |
7 | # Add your Bing Entity Search subscription key to your environment variables.
8 | SUBSCRIPTION_KEY = os.environ['BING_ENTITY_SEARCH_SUBSCRIPTION_KEY']
9 | ENDPOINT = os.environ['BING_ENTITY_SEARCH_ENDPOINT']
10 |
11 | def dominant_entity_lookup(subscription_key):
12 | """DominantEntityLookup.
13 |
14 | This will look up a single entity (Satya Nadella) and print out a short description about them.
15 | """
16 | client = EntitySearchClient(
17 | endpoint=ENDPOINT,
18 | credentials=CognitiveServicesCredentials(subscription_key)
19 | )
20 |
21 | try:
22 | entity_data = client.entities.search(query="satya nadella")
23 |
24 | if entity_data.entities.value:
25 | # find the entity that represents the dominant one
26 |
27 | main_entities = [entity for entity in entity_data.entities.value
28 | if entity.entity_presentation_info.entity_scenario == "DominantEntity"]
29 |
30 | if main_entities:
31 | print(
32 | 'Searched for "Satya Nadella" and found a dominant entity with this description:')
33 | print(main_entities[0].description)
34 | else:
35 | print("Couldn't find main entity Satya Nadella!")
36 |
37 | else:
38 | print("Didn't see any data..")
39 |
40 | except Exception as err:
41 | print("Encountered exception. {}".format(err))
42 |
43 |
44 | def handling_disambiguation(subscription_key):
45 | """HandlingDisambiguation.
46 |
47 | "This will handle disambiguation results for an ambiguous query (William Gates)".
48 | """
49 | client = EntitySearchClient(
50 | endpoint="https://api.cognitive.microsoft.com",
51 | credentials=CognitiveServicesCredentials(subscription_key)
52 | )
53 |
54 | try:
55 | entity_data = client.entities.search(query="william gates")
56 |
57 | if entity_data.entities.value:
58 | # find the entity that represents the dominant one
59 |
60 | main_entities = [entity for entity in entity_data.entities.value
61 | if entity.entity_presentation_info.entity_scenario == "DominantEntity"]
62 |
63 | disambig_entities = [entity for entity in entity_data.entities.value
64 | if entity.entity_presentation_info.entity_scenario == "DisambiguationItem"]
65 |
66 | if main_entities:
67 | main_entity = main_entities[0]
68 | type_hint = main_entity.entity_presentation_info.entity_type_display_hint
69 |
70 | print('Searched for "William Gates" and found a dominant entity {}with this description:'.format(
71 | '"with type hint "{}" '.format(type_hint) if type_hint else ''))
72 | print(main_entity.description)
73 | else:
74 | print("Couldn't find a reliable dominant entity for William Gates!")
75 |
76 | if disambig_entities:
77 | print(
78 | "\nThis query is pretty ambiguous and can be referring to multiple things. Did you mean one of these:")
79 | suggestions = []
80 | for disambig_entity in disambig_entities:
81 | suggestions.append("{} the {}".format(
82 | disambig_entity.name, disambig_entity.entity_presentation_info.entity_type_display_hint))
83 | print(", or ".join(suggestions))
84 | else:
85 | print(
86 | "We didn't find any disambiguation items for William Gates, so we must be certain what you're talking about!")
87 |
88 | else:
89 | print("Didn't see any data..")
90 |
91 | except Exception as err:
92 | print("Encountered exception. {}".format(err))
93 |
94 |
95 | def restaurant_lookup(subscription_key):
96 | """RestaurantLookup.
97 |
98 | This will look up a single restaurant (john howie bellevue) and print out its phone number.
99 | """
100 | client = EntitySearchClient(
101 | endpoint="https://api.cognitive.microsoft.com",
102 | credentials=CognitiveServicesCredentials(subscription_key)
103 | )
104 |
105 | try:
106 | entity_data = client.entities.search(query="john howie bellevue")
107 |
108 | if entity_data.places.value:
109 |
110 | restaurant = entity_data.places.value[0]
111 |
112 | # Some local entities will be places, others won't be. Depending on what class contains the data you want, you can check
113 | # using isinstance one of the class, or try to get the attribute and handle the exception (EAFP principle).
114 | # The recommended Python way is usually EAFP (see https://docs.python.org/3/glossary.html)
115 | # In this case, the item being returned is technically a Restaurant, but the Place schema has the data we want (telephone)
116 |
117 | # Pythonic approach : EAFP "Easier to ask for forgiveness than permission"
118 | try:
119 | telephone = restaurant.telephone
120 | print(
121 | 'Searched for "John Howie Bellevue" and found a restaurant with this phone number:')
122 | print(telephone)
123 | except AttributeError:
124 | print("Couldn't find a place!")
125 |
126 | # More cross language approach
127 | if isinstance(restaurant, Place):
128 | print(
129 | 'Searched for "John Howie Bellevue" and found a restaurant with this phone number:')
130 | print(restaurant.telephone)
131 | else:
132 | print("Couldn't find a place!")
133 |
134 | else:
135 | print("Didn't see any data..")
136 |
137 | except Exception as err:
138 | print("Encountered exception. {}".format(err))
139 |
140 |
141 | def multiple_restaurant_lookup(subscription_key):
142 | """MultipleRestaurantLookup.
143 |
144 | This will look up a list of restaurants (seattle restaurants) and present their names and phone numbers.
145 | """
146 |
147 | client = EntitySearchClient(
148 | endpoint="https://api.cognitive.microsoft.com",
149 | credentials=CognitiveServicesCredentials(subscription_key)
150 | )
151 |
152 | try:
153 | restaurants = client.entities.search(query="seattle restaurants")
154 |
155 | if restaurants.places.value:
156 |
157 | # get all the list items that relate to this query
158 | list_items = [entity for entity in restaurants.places.value
159 | if entity.entity_presentation_info.entity_scenario == "ListItem"]
160 |
161 | if list_items:
162 |
163 | suggestions = []
164 | for place in list_items:
165 | # Pythonic approach : EAFP "Easier to ask for forgiveness than permission"
166 | # see https://docs.python.org/3/glossary.html
167 | try:
168 | suggestions.append("{} ({})".format(
169 | place.name, place.telephone))
170 | except AttributeError:
171 | print(
172 | "Unexpectedly found something that isn\'t a place named '{}'", place.name)
173 |
174 | print("Ok, we found these places: ")
175 | print(", ".join(suggestions))
176 |
177 | else:
178 | print("Couldn't find any relevant results for \"seattle restaurants\"")
179 |
180 | else:
181 | print("Didn't see any data..")
182 |
183 | except Exception as err:
184 | print("Encountered exception. {}".format(err))
185 |
186 |
187 | def error(subscription_key):
188 | """Error.
189 |
190 | This triggers a bad request and shows how to read the error response.
191 | """
192 |
193 | client = EntitySearchClient(
194 | endpoint="https://api.cognitive.microsoft.com",
195 | credentials=CognitiveServicesCredentials(subscription_key)
196 | )
197 |
198 | try:
199 | entity_data = client.entities.search(
200 | query="tom cruise", market="no-ty")
201 | except ErrorResponseException as err:
202 | # The status code of the error should be a good indication of what occurred. However, if you'd like more details, you can dig into the response.
203 | # Please note that depending on the type of error, the response schema might be different, so you aren't guaranteed a specific error response schema.
204 |
205 | print("Exception occurred, status code {} with reason {}.\n".format(
206 | err.response.status_code, err))
207 |
208 | # if you'd like more descriptive information (if available)
209 | if err.error.errors:
210 | print("This is the errors I have:")
211 | for error in err.error.errors:
212 | print("Parameter \"{}\" has an invalid value \"{}\". SubCode is \"{}\". Detailed message is \"{}\"".format(
213 | error.parameter, error.value, error.sub_code, error.message))
214 | else:
215 | print("There was no details on the error.")
216 |
217 |
218 | if __name__ == "__main__":
219 | import sys, os.path
220 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
221 | from samples.tools import execute_samples
222 | execute_samples(globals(), SUBSCRIPTION_KEY)
223 |
--------------------------------------------------------------------------------
/samples/search/image-search-quickstart.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.imagesearch import ImageSearchClient
4 | from msrest.authentication import CognitiveServicesCredentials
5 |
6 | # Add your Bing Search V7 subscription key and endpoint to your environment variables.
7 | SUBSCRIPTION_KEY = os.environ['BING_SEARCH_V7_SUBSCRIPTION_KEY']
8 | ENDPOINT = os.environ['BING_ENTITY_SEARCH_ENDPOINT']
9 |
10 | search_term = "canadian rockies"
11 |
12 | """
13 | This application will search images on the web with the Bing Image Search API and print out first image result.
14 | """
15 | # create the image search client
16 | client = ImageSearchClient(ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
17 | # send a search query to the Bing Image Search API
18 | image_results = client.images.search(query=search_term)
19 | print("Searching the web for images of: {}".format(search_term))
20 |
21 | # Image results
22 | if image_results.value:
23 | first_image_result = image_results.value[0]
24 | print("Total number of images returned: {}".format(len(image_results.value)))
25 | print("First image thumbnail url: {}".format(
26 | first_image_result.thumbnail_url))
27 | print("First image content url: {}".format(first_image_result.content_url))
28 | else:
29 | print("Couldn't find image results!")
30 |
--------------------------------------------------------------------------------
/samples/search/news_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.newssearch import NewsSearchClient
4 | from msrest.authentication import CognitiveServicesCredentials
5 |
6 | # Add your Bing Search V7 subscription key to your environment variables.
7 | SUBSCRIPTION_KEY = os.environ['BING_SEARCH_V7_SUBSCRIPTION_KEY']
8 | # Note: your endpoint should not include the /bing/v7.0 suffix
9 | ENDPOINT = os.environ['BING_SEARCH_V7_ENDPOINT']
10 |
11 | def news_search(subscription_key):
12 | """NewsSearch.
13 |
14 | This will search news for (Quantum Computing) with market and count parameters then verify number of results and print out totalEstimatedMatches, name, url, description, published time and name of provider of the first news result
15 | """
16 | client = NewsSearchClient(
17 | endpoint=ENDPOINT,
18 | credentials=CognitiveServicesCredentials(subscription_key)
19 | )
20 |
21 | try:
22 | news_result = client.news.search(
23 | query="Quantum Computing", market="en-us", count=10)
24 | print("Search news for query \"Quantum Computing\" with market and count")
25 |
26 | if news_result.value:
27 | first_news_result = news_result.value[0]
28 | print("Total estimated matches value: {}".format(
29 | news_result.total_estimated_matches))
30 | print("News result count: {}".format(len(news_result.value)))
31 | print("First news name: {}".format(first_news_result.name))
32 | print("First news url: {}".format(first_news_result.url))
33 | print("First news description: {}".format(
34 | first_news_result.description))
35 | print("First published time: {}".format(
36 | first_news_result.date_published))
37 | print("First news provider: {}".format(
38 | first_news_result.provider[0].name))
39 | else:
40 | print("Didn't see any news result data..")
41 |
42 | except Exception as err:
43 | print("Encountered exception. {}".format(err))
44 |
45 |
46 | def news_search_with_filtering(subscription_key):
47 | """NewsSearchWithFilters.
48 |
49 | This will search most recent news for (Artificial Intelligence) with freshness and sortBy parameters then verify number of results and print out totalEstimatedMatches, name, url, description, published time and name of provider of the first news result.
50 | """
51 | client = NewsSearchClient(
52 | endpoint="https://api.cognitive.microsoft.com",
53 | credentials=CognitiveServicesCredentials(subscription_key)
54 | )
55 |
56 | try:
57 | news_result = client.news.search(
58 | query="Artificial Intelligence",
59 | market="en-us",
60 | freshness="Week",
61 | sort_by="Date"
62 | )
63 | print("Search most recent news for query \"Artificial Intelligence\" with freshness and sortBy")
64 |
65 | if news_result.value:
66 | first_news_result = news_result.value[0]
67 | print("News result count: {}".format(len(news_result.value)))
68 | print("First news name: {}".format(first_news_result.name))
69 | print("First news url: {}".format(first_news_result.url))
70 | print("First news description: {}".format(
71 | first_news_result.description))
72 | print("First published time: {}".format(
73 | first_news_result.date_published))
74 | print("First news provider: {}".format(
75 | first_news_result.provider[0].name))
76 | else:
77 | print("Didn't see any news result data..")
78 |
79 | except Exception as err:
80 | print("Encountered exception. {}".format(err))
81 |
82 |
83 | def news_category(subscription_key):
84 | """NewsCategory.
85 |
86 | This will search category news for movie and TV entertainment with safe search then verify number of results and print out category, name, url, description, published time and name of provider of the first news result.
87 | """
88 | client = NewsSearchClient(
89 | endpoint="https://api.cognitive.microsoft.com",
90 | credentials=CognitiveServicesCredentials(subscription_key)
91 | )
92 |
93 | try:
94 | news_result = client.news.category(
95 | category="Entertainment_MovieAndTV",
96 | market="en-us",
97 | safe_search="strict"
98 | )
99 | print("Search category news for movie and TV entertainment with safe search")
100 |
101 | if news_result.value:
102 | first_news_result = news_result.value[0]
103 | print("News result count: {}".format(len(news_result.value)))
104 | print("First news category: {}".format(first_news_result.category))
105 | print("First news name: {}".format(first_news_result.name))
106 | print("First news url: {}".format(first_news_result.url))
107 | print("First news description: {}".format(
108 | first_news_result.description))
109 | print("First published time: {}".format(
110 | first_news_result.date_published))
111 | print("First news provider: {}".format(
112 | first_news_result.provider[0].name))
113 | else:
114 | print("Didn't see any news result data..")
115 |
116 | except Exception as err:
117 | print("Encountered exception. {}".format(err))
118 |
119 |
120 | def news_trending(subscription_key):
121 | """NewsTrending.
122 |
123 | This will search news trending topics in Bing then verify number of results and print out name, text of query, webSearchUrl, newsSearchUrl and image Url of the first news result.
124 | """
125 | client = NewsSearchClient(
126 | endpoint="https://api.cognitive.microsoft.com",
127 | credentials=CognitiveServicesCredentials(subscription_key)
128 | )
129 |
130 | try:
131 | trending_topics = client.news.trending(market="en-us")
132 | print("Search news trending topics in Bing")
133 |
134 | if trending_topics.value:
135 | first_topic = trending_topics.value[0]
136 | print("News result count: {}".format(len(trending_topics.value)))
137 | print("First topic name: {}".format(first_topic.name))
138 | print("First topic query: {}".format(first_topic.query.text))
139 | print("First topic image url: {}".format(first_topic.image.url))
140 | print("First topic webSearchUrl: {}".format(
141 | first_topic.web_search_url))
142 | print("First topic newsSearchUrl: {}".format(
143 | first_topic.news_search_url))
144 | else:
145 | print("Didn't see any topics result data..")
146 |
147 | except Exception as err:
148 | print("Encountered exception. {}".format(err))
149 |
150 |
151 | if __name__ == "__main__":
152 | import sys, os.path
153 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
154 | from samples.tools import execute_samples
155 | execute_samples(globals(), SUBSCRIPTION_KEY)
156 |
--------------------------------------------------------------------------------
/samples/search/video_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.videosearch import VideoSearchClient
4 | from azure.cognitiveservices.search.videosearch.models import VideoPricing, VideoLength, VideoResolution, VideoInsightModule
5 | from msrest.authentication import CognitiveServicesCredentials
6 |
7 | # Add your Bing Search V7 subscription key to your environment variables.
8 | SUBSCRIPTION_KEY = os.environ['BING_SEARCH_V7_SUBSCRIPTION_KEY']
9 | ENDPOINT = os.environ['BING_SEARCH_V7_ENDPOINT']
10 |
11 | def video_search(subscription_key):
12 | """VideoSearch.
13 |
14 | This will search videos for (SwiftKey) then verify number of results and print out id, name and url of first video result.
15 | """
16 | client = VideoSearchClient(ENDPOINT, CognitiveServicesCredentials(subscription_key))
17 |
18 | try:
19 | video_result = client.videos.search(query="SwiftKey")
20 | print("Search videos for query \"SwiftKey\"")
21 |
22 | if video_result.value:
23 | first_video_result = video_result.value[0]
24 | print("Video result count: {}".format(len(video_result.value)))
25 | print("First video id: {}".format(first_video_result.video_id))
26 | print("First video name: {}".format(first_video_result.name))
27 | print("First video url: {}".format(first_video_result.content_url))
28 | else:
29 | print("Didn't see any video result data..")
30 |
31 | except Exception as err:
32 | print("Encountered exception. {}".format(err))
33 |
34 |
35 | def video_search_with_filtering(subscription_key):
36 | """VideoSearchWithFilters.
37 |
38 | This will search videos for (Bellevue Trailer) that is free, short and 1080p resolution then verify number of results and print out id, name and url of first video result
39 | """
40 | client = VideoSearchAPI(CognitiveServicesCredentials(subscription_key))
41 |
42 | try:
43 | video_result = client.videos.search(
44 | query="Bellevue Trailer",
45 | pricing=VideoPricing.free, # Can use the str "free" too
46 | length=VideoLength.short, # Can use the str "short" too
47 | resolution=VideoResolution.hd1080p # Can use the str "hd1080p" too
48 | )
49 | print("Search videos for query \"Bellevue Trailer\" that is free, short and 1080p resolution")
50 |
51 | if video_result.value:
52 | first_video_result = video_result.value[0]
53 | print("Video result count: {}".format(len(video_result.value)))
54 | print("First video id: {}".format(first_video_result.video_id))
55 | print("First video name: {}".format(first_video_result.name))
56 | print("First video url: {}".format(first_video_result.content_url))
57 | else:
58 | print("Didn't see any video result data..")
59 |
60 | except Exception as err:
61 | print("Encountered exception. {}".format(err))
62 |
63 |
64 | def video_trending(subscription_key):
65 | """VideoTrending.
66 |
67 | This will trending videos then verify banner tiles and categories.
68 | """
69 | client = VideoSearchAPI(CognitiveServicesCredentials(subscription_key))
70 |
71 | try:
72 | trending_result = client.videos.trending()
73 | print("Search trending video")
74 |
75 | # Banner tiles
76 | if trending_result.banner_tiles:
77 | first_banner_tile = trending_result.banner_tiles[0]
78 | print("Banner tile count: {}".format(
79 | len(trending_result.banner_tiles)))
80 | print("First banner tile text: {}".format(
81 | first_banner_tile.query.text))
82 | print("First banner tile url: {}".format(
83 | first_banner_tile.query.web_search_url))
84 | else:
85 | print("Couldn't find banner tiles!")
86 |
87 | # Categorires
88 | if trending_result.categories:
89 | first_category = trending_result.categories[0]
90 | print("Category count: {}".format(len(trending_result.categories)))
91 | print("First category title: {}".format(first_category.title))
92 | if first_category.subcategories:
93 | first_subcategory = first_category.subcategories[0]
94 | print("Subcategory count: {}".format(
95 | len(first_category.subcategories)))
96 | print("First subcategory title: {}".format(
97 | first_subcategory.title))
98 | if first_subcategory.tiles:
99 | first_tile = first_subcategory.tiles[0]
100 | print("Subcategory tile count: {}".format(
101 | len(first_subcategory.tiles)))
102 | print("First tile text: {}".format(first_tile.query.text))
103 | print("First tile url: {}".format(
104 | first_tile.query.web_search_url))
105 | else:
106 | print("Couldn't find subcategory tiles!")
107 | else:
108 | print("Couldn't find subcategories!")
109 | else:
110 | print("Couldn't find categories!")
111 |
112 | except Exception as err:
113 | print("Encountered exception. {}".format(err))
114 |
115 |
116 | def video_detail(subscription_key):
117 | """VideoDetail.
118 |
119 | This will search videos for (Bellevue Trailer) and then search for detail information of the first video
120 | """
121 | client = VideoSearchAPI(CognitiveServicesCredentials(subscription_key))
122 |
123 | try:
124 | video_result = client.videos.search(query="Bellevue Trailer")
125 | first_video_result = video_result.value[0]
126 |
127 | video_details = client.videos.details(
128 | query="Bellevue Trailer",
129 | id=first_video_result.video_id,
130 | modules=[VideoInsightModule.all] # Can use ["all"] too
131 | )
132 | print("Search detail for video id={}, name={}".format(
133 | first_video_result.video_id,
134 | first_video_result.name
135 | ))
136 |
137 | if video_details.video_result:
138 | print("Expected Video id: {}".format(
139 | video_details.video_result.video_id))
140 | print("Expected Video name: {}".format(
141 | video_details.video_result.name))
142 | print("Expected Video url: {}".format(
143 | video_details.video_result.content_url))
144 | else:
145 | print("Couldn't find expected video")
146 |
147 | if video_details.related_videos.value:
148 | first_related_video = video_details.related_videos.value[0]
149 | print("Related video count: {}".format(
150 | len(video_details.related_videos.value)))
151 | print("First related video id: {}".format(
152 | first_related_video.video_id))
153 | print("First related video name: {}".format(
154 | first_related_video.name))
155 | print("First related video content url: {}".format(
156 | first_related_video.content_url))
157 | else:
158 | print("Couldn't find any related video!")
159 |
160 | except Exception as err:
161 | print("Encountered exception. {}".format(err))
162 |
163 |
164 | if __name__ == "__main__":
165 | import sys, os.path
166 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
167 | from samples.tools import execute_samples
168 | execute_samples(globals(), SUBSCRIPTION_KEY)
169 |
--------------------------------------------------------------------------------
/samples/search/web_search_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from azure.cognitiveservices.search.websearch import WebSearchClient
4 | from azure.cognitiveservices.search.websearch.models import SafeSearch
5 | from msrest.authentication import CognitiveServicesCredentials
6 |
7 | # Add your Bing Search V7 subscription key to your environment variables.
8 | SUBSCRIPTION_KEY = os.environ['BING_SEARCH_V7_SUBSCRIPTION_KEY']
9 | ENDPOINT = os.environ['BING_SEARCH_V7_ENDPOINT']
10 |
11 | # Comment this logging mute out, if you want to include logging
12 | logger = logging.getLogger() # get the default logger
13 | logger.setLevel(50) # set the skip all log which is smaller than critical (50)
14 |
15 |
16 | def result_types_lookup(subscription_key):
17 | """WebSearchResultTypesLookup.
18 |
19 | This will look up a single query (Xbox) and print out name and url for first web, image, news and videos results.
20 | """
21 | client = WebSearchClient(ENDPOINT, CognitiveServicesCredentials(subscription_key))
22 |
23 | try:
24 |
25 | web_data = client.web.search(query="xbox")
26 | print("Searched for Query# \" Xbox \"")
27 |
28 | # WebPages
29 | if web_data.web_pages.value:
30 |
31 | print("Webpage Results#{}".format(len(web_data.web_pages.value)))
32 |
33 | first_web_page = web_data.web_pages.value[0]
34 | print("First web page name: {} ".format(first_web_page.name))
35 | print("First web page URL: {} ".format(first_web_page.url))
36 |
37 | else:
38 | print("Didn't see any Web data..")
39 |
40 | # Images
41 | if web_data.images.value:
42 |
43 | print("Image Results#{}".format(len(web_data.images.value)))
44 |
45 | first_image = web_data.images.value[0]
46 | print("First Image name: {} ".format(first_image.name))
47 | print("First Image URL: {} ".format(first_image.url))
48 |
49 | else:
50 | print("Didn't see any Image..")
51 |
52 | # News
53 | if web_data.news.value:
54 |
55 | print("News Results#{}".format(len(web_data.news.value)))
56 |
57 | first_news = web_data.news.value[0]
58 | print("First News name: {} ".format(first_news.name))
59 | print("First News URL: {} ".format(first_news.url))
60 |
61 | else:
62 | print("Didn't see any News..")
63 |
64 | # Videos
65 | if web_data.videos.value:
66 |
67 | print("Videos Results#{}".format(len(web_data.videos.value)))
68 |
69 | first_video = web_data.videos.value[0]
70 | print("First Videos name: {} ".format(first_video.name))
71 | print("First Videos URL: {} ".format(first_video.url))
72 |
73 | else:
74 | print("Didn't see any Videos..")
75 |
76 | except Exception as err:
77 | print("Encountered exception. {}".format(err))
78 |
79 |
80 | def web_results_with_count_and_offset(subscription_key):
81 | """WebResultsWithCountAndOffset.
82 |
83 | This will search (Best restaurants in Seattle), verify number of results and print out name and url of first result.
84 | """
85 |
86 | client = WebSearchAPI(CognitiveServicesCredentials(subscription_key))
87 |
88 | try:
89 | web_data = client.web.search(
90 | query="Best restaurants in Seattle", offset=10, count=20)
91 | print("Searched for Query# \" Best restaurants in Seattle \"")
92 |
93 | if web_data.web_pages.value:
94 |
95 | print("Webpage Results#{}".format(len(web_data.web_pages.value)))
96 |
97 | first_web_page = web_data.web_pages.value[0]
98 | print("First web page name: {} ".format(first_web_page.name))
99 | print("First web page URL: {} ".format(first_web_page.url))
100 |
101 | else:
102 | print("Didn't see any Web data..")
103 |
104 | except Exception as err:
105 | print("Encountered exception. {}".format(err))
106 |
107 |
108 | def web_search_with_response_filter(subscription_key):
109 | """WebSearchWithResponseFilter.
110 |
111 | This will search (Microsoft) with response filters to news and print details of news.
112 | """
113 |
114 | client = WebSearchAPI(CognitiveServicesCredentials(subscription_key))
115 |
116 | try:
117 | web_data = client.web.search(
118 | query="Microsoft", response_filter=["News"])
119 | print("Searched for Query# \" Microsoft \" with response filters \"News\"")
120 |
121 | # News attribute since I filtered "News"
122 | if web_data.news.value:
123 |
124 | print("Webpage Results#{}".format(len(web_data.news.value)))
125 |
126 | first_web_page = web_data.news.value[0]
127 | print("First web page name: {} ".format(first_web_page.name))
128 | print("First web page URL: {} ".format(first_web_page.url))
129 |
130 | else:
131 | print("Didn't see any Web data..")
132 |
133 | except Exception as err:
134 | print("Encountered exception. {}".format(err))
135 |
136 |
137 | def web_search_with_answer_count_promote_and_safe_search(subscription_key):
138 | """WebSearchWithAnswerCountPromoteAndSafeSearch.
139 |
140 | This will search (Lady Gaga) with answerCount and promote parameters and print details of answers.
141 | """
142 |
143 | client = WebSearchAPI(CognitiveServicesCredentials(subscription_key))
144 |
145 | try:
146 | web_data = client.web.search(
147 | query="Lady Gaga",
148 | answer_count=2,
149 | promote=["videos"],
150 | safe_search=SafeSearch.strict # or directly "Strict"
151 | )
152 | print("Searched for Query# \" Lady Gaga\"")
153 |
154 | if web_data.web_pages.value:
155 |
156 | print("Webpage Results#{}".format(len(web_data.web_pages.value)))
157 |
158 | first_web_page = web_data.web_pages.value[0]
159 | print("First web page name: {} ".format(first_web_page.name))
160 | print("First web page URL: {} ".format(first_web_page.url))
161 |
162 | else:
163 | print("Didn't see any Web data..")
164 |
165 | except Exception as err:
166 | print("Encountered exception. {}".format(err))
167 |
168 |
169 | if __name__ == "__main__":
170 | import sys, os.path
171 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
172 | from samples.tools import execute_samples
173 | execute_samples(globals(), SUBSCRIPTION_KEY)
174 |
--------------------------------------------------------------------------------
/samples/tools.py:
--------------------------------------------------------------------------------
1 | """This file is just a sample launcher code.
2 |
3 | Nothing is is related to Cognitive Services.
4 | """
5 | try:
6 | from inspect import getfullargspec as get_arg_spec
7 | except ImportError:
8 | from inspect import getargspec as get_arg_spec
9 | import os
10 | import sys
11 | import types
12 |
13 |
14 | class SubscriptionKeyError(Exception):
15 | pass
16 |
17 |
18 | def start_sample(func, subscription_key):
19 | """Start the function and show its doc on output.
20 | """
21 | print("Sample:", func.__doc__, "\n")
22 | func(subscription_key)
23 | print("\n\n")
24 |
25 |
26 | def execute_samples(module_globals, key_env_variable):
27 | """Execute samples based on a dict
28 | """
29 | try:
30 | subscription_key = sys.argv[1] if len(
31 | sys.argv) >= 2 else os.environ[key_env_variable]
32 | except KeyError:
33 | raise SubscriptionKeyError(
34 | "You need to either set the {} env variable.".format(key_env_variable))
35 |
36 | for func in list(module_globals.values()):
37 | if not isinstance(func, types.FunctionType):
38 | continue
39 | args = get_arg_spec(func).args
40 | if 'subscription_key' in args:
41 | start_sample(func, subscription_key)
42 |
--------------------------------------------------------------------------------
/samples/vision/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/__init__.py
--------------------------------------------------------------------------------
/samples/vision/computer_vision_extract_text.py:
--------------------------------------------------------------------------------
1 | from azure.cognitiveservices.vision.computervision import ComputerVisionClient
2 | from msrest.authentication import CognitiveServicesCredentials
3 | from azure.cognitiveservices.vision.computervision.models import TextRecognitionMode
4 | from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes
5 | import time
6 |
7 | '''
8 | References:
9 | Quickstart: https://docs.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/python-sdk
10 | SDK: https://docs.microsoft.com/en-us/python/api/overview/azure/cognitiveservices/computervision?view=azure-python
11 | '''
12 |
13 | # Replace with your endpoint and key from the Azure portal
14 | endpoint = ''
15 | key = ''
16 |
17 | # Alternatively, uncomment and get endpoint/key from environment variables
18 | '''
19 | import os
20 | endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
21 | key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
22 | '''
23 |
24 | # Set credentials
25 | credentials = CognitiveServicesCredentials(key)
26 |
27 | # Create client
28 | client = ComputerVisionClient(endpoint, credentials)
29 |
30 | url = "https://azurecomcdn.azureedge.net/cvt-1979217d3d0d31c5c87cbd991bccfee2d184b55eeb4081200012bdaf6a65601a/images/shared/cognitive-services-demos/read-text/read-1-thumbnail.png"
31 | mode = TextRecognitionMode.handwritten
32 | raw = True
33 | custom_headers = None
34 | numberOfCharsInOperationId = 36
35 |
36 | # Async SDK call
37 | rawHttpResponse = client.batch_read_file(url, mode, custom_headers, raw)
38 |
39 | # Get ID from returned headers
40 | operationLocation = rawHttpResponse.headers["Operation-Location"]
41 | idLocation = len(operationLocation) - numberOfCharsInOperationId
42 | operationId = operationLocation[idLocation:]
43 |
44 | # SDK call
45 | while True:
46 | result = client.get_read_operation_result(operationId)
47 | if result.status not in ['NotStarted', 'Running']:
48 | break
49 | time.sleep(1)
50 |
51 | # Get data: displays text captured and its bounding box (position in the image)
52 | if result.status == TextOperationStatusCodes.succeeded:
53 | for textResult in result.recognition_results:
54 | for line in textResult.lines:
55 | print(line.text)
56 | print(line.bounding_box)
--------------------------------------------------------------------------------
/samples/vision/computer_vision_samples.py:
--------------------------------------------------------------------------------
1 | import os.path
2 |
3 | from azure.cognitiveservices.vision.computervision import ComputerVisionClient
4 | from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
5 | from msrest.authentication import CognitiveServicesCredentials
6 |
7 | SUBSCRIPTION_KEY_ENV_NAME = "COMPUTERVISION_SUBSCRIPTION_KEY"
8 | COMPUTERVISION_LOCATION = os.environ.get(
9 | "COMPUTERVISION_LOCATION", "westcentralus")
10 |
11 | IMAGES_FOLDER = os.path.join(os.path.dirname(
12 | os.path.realpath(__file__)), "images")
13 |
14 |
15 | def image_analysis_in_stream(subscription_key):
16 | """ImageAnalysisInStream.
17 |
18 | This will analyze an image from a stream and return all available features.
19 | """
20 | client = ComputerVisionClient(
21 | endpoint="https://" + COMPUTERVISION_LOCATION + ".api.cognitive.microsoft.com/",
22 | credentials=CognitiveServicesCredentials(subscription_key)
23 | )
24 |
25 | with open(os.path.join(IMAGES_FOLDER, "house.jpg"), "rb") as image_stream:
26 | image_analysis = client.analyze_image_in_stream(
27 | image=image_stream,
28 | visual_features=[
29 | VisualFeatureTypes.image_type, # Could use simple str "ImageType"
30 | VisualFeatureTypes.faces, # Could use simple str "Faces"
31 | VisualFeatureTypes.categories, # Could use simple str "Categories"
32 | VisualFeatureTypes.color, # Could use simple str "Color"
33 | VisualFeatureTypes.tags, # Could use simple str "Tags"
34 | VisualFeatureTypes.description # Could use simple str "Description"
35 | ]
36 | )
37 |
38 | print("This image can be described as: {}\n".format(
39 | image_analysis.description.captions[0].text))
40 |
41 | print("Tags associated with this image:\nTag\t\tConfidence")
42 | for tag in image_analysis.tags:
43 | print("{}\t\t{}".format(tag.name, tag.confidence))
44 |
45 | print("\nThe primary colors of this image are: {}".format(
46 | image_analysis.color.dominant_colors))
47 |
48 |
49 | def recognize_text(subscription_key):
50 | """RecognizeTextUsingRecognizeAPI.
51 |
52 | This will recognize text of the given image using the recognizeText API.
53 | """
54 | import time
55 | client = ComputerVisionClient(
56 | endpoint="https://" + COMPUTERVISION_LOCATION + ".api.cognitive.microsoft.com/",
57 | credentials=CognitiveServicesCredentials(subscription_key)
58 | )
59 |
60 | with open(os.path.join(IMAGES_FOLDER, "make_things_happen.jpg"), "rb") as image_stream:
61 | job = client.recognize_text_in_stream(
62 | image=image_stream,
63 | mode="Printed",
64 | raw=True
65 | )
66 | operation_id = job.headers['Operation-Location'].split('/')[-1]
67 |
68 | image_analysis = client.get_text_operation_result(operation_id)
69 | while image_analysis.status in ['NotStarted', 'Running']:
70 | time.sleep(1)
71 | image_analysis = client.get_text_operation_result(
72 | operation_id=operation_id)
73 |
74 | print("Job completion is: {}\n".format(image_analysis.status))
75 |
76 | print("Recognized:\n")
77 | lines = image_analysis.recognition_result.lines
78 | print(lines[0].words[0].text) # "make"
79 | print(lines[1].words[0].text) # "things"
80 | print(lines[2].words[0].text) # "happen"
81 |
82 |
83 | def recognize_printed_text_in_stream(subscription_key):
84 | """RecognizedPrintedTextUsingOCR_API.
85 |
86 | This will do an OCR analysis of the given image.
87 | """
88 | client = ComputerVisionClient(
89 | endpoint="https://" + COMPUTERVISION_LOCATION + ".api.cognitive.microsoft.com/",
90 | credentials=CognitiveServicesCredentials(subscription_key)
91 | )
92 |
93 | with open(os.path.join(IMAGES_FOLDER, "computer_vision_ocr.png"), "rb") as image_stream:
94 | image_analysis = client.recognize_printed_text_in_stream(
95 | image=image_stream,
96 | language="en"
97 | )
98 |
99 | lines = image_analysis.regions[0].lines
100 | print("Recognized:\n")
101 | for line in lines:
102 | line_text = " ".join([word.text for word in line.words])
103 | print(line_text)
104 |
105 |
106 | if __name__ == "__main__":
107 | import sys, os.path
108 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
109 | from samples.tools import execute_samples
110 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
111 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_image_job_samples.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 |
4 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
5 | from msrest.authentication import CognitiveServicesCredentials
6 |
7 | # Add your Azure Content Moderator subscription key to your environment variables.
8 | SUBSCRIPTION_KEY = os.environ["CONTENT_MODERATOR_SUBSCRIPTION_KEY"]
9 |
10 | def image_review_jobs(subscription_key):
11 | """ImageReviewJobs.
12 |
13 | This will review an image using workflow and job.
14 | """
15 |
16 | # The moderation job will use this workflow that you defined earlier.
17 | # See the quickstart article to learn how to setup custom workflows.
18 | # https://docs.microsoft.com/azure/cognitive-services/content-moderator/review-tool-user-guide/workflows
19 | workflow_name = "insert your workflow name here"
20 |
21 | # The name of the team to assign the job to.
22 | # This must be the team name you used to create your Content Moderator account. You can
23 | # retrieve your team name from the Content Moderator web site. Your team name is the Id
24 | # associated with your subscription.
25 | team_name = "insert your team name here"
26 |
27 | # An image with this text:
28 | # IF WE DID ALL THE THINGS WE ARE CAPABLE OF DOING, WE WOULD LITERALLY ASTOUND OURSELVE
29 | # Be sure your workflow create a review for this (e.g. OCR contains some words).
30 | image_url = "https://moderatorsampleimages.blob.core.windows.net/samples/sample2.jpg"
31 |
32 | # Where you want to receive the approval/refuse event. This is the only way to get this information.
33 | call_back_endpoint = "https://requestb.in/1l64pe71"
34 |
35 | client = ContentModeratorClient(
36 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
37 | credentials=CognitiveServicesCredentials(subscription_key)
38 | )
39 |
40 | print("Create moderation job for an image.\n")
41 | job_result = client.reviews.create_job(
42 | team_name=team_name,
43 | content_type="Image", # Possible values include: 'Image', 'Text', 'Video'
44 | content_id="ContentID", # Id/Name to identify the content submitted.
45 | workflow_name=workflow_name,
46 | # Possible values include: 'application/json', 'image/jpeg'
47 | job_content_type="application/json",
48 | content_value=image_url,
49 | call_back_endpoint=call_back_endpoint
50 | )
51 | job_id = job_result.job_id
52 |
53 | print("Get job status before review.")
54 | job_details = client.reviews.get_job_details(
55 | team_name=team_name,
56 | job_id=job_id,
57 | )
58 | pprint(job_details.as_dict())
59 |
60 | input("\nPerform manual reviews on the Content Moderator Review Site, and hit enter here.")
61 | job_details = client.reviews.get_job_details(
62 | team_name=team_name,
63 | job_id=job_id,
64 | )
65 | pprint(job_details.as_dict())
66 |
67 | # Your call back endpoint should have received an event like this:
68 | # {'call_back_endpoint': 'https://requestb.in/1l64pe71',
69 | # 'id': '201901d49ee1a417ae45a991c5c1d6af25cace',
70 | # 'job_execution_report': [{'msg': 'Posted results to the Callbackendpoint: '
71 | # 'https://requestb.in/1l64pe71',
72 | # 'ts': '2019-01-11T00:00:15.738452'},
73 | # {'msg': 'Job marked completed and job content has '
74 | # 'been removed',
75 | # 'ts': '2019-01-11T00:00:15.6583757'},
76 | # {'msg': 'Execution Complete',
77 | # 'ts': '2019-01-11T00:00:15.4872128'},
78 | # {'msg': 'Successfully got hasText response from '
79 | # 'Moderator',
80 | # 'ts': '2019-01-11T00:00:14.1389317'},
81 | # {'msg': 'Getting hasText from Moderator',
82 | # 'ts': '2019-01-11T00:00:13.0689178'},
83 | # {'msg': 'Starting Execution - Try 1',
84 | # 'ts': '2019-01-11T00:00:12.1120066'}],
85 | # 'result_meta_data': [{'key': 'hasText', 'value': 'True'},
86 | # {'key': 'ocrText',
87 | # 'value': 'IF WE DID \r\n'
88 | # 'ALL \r\n'
89 | # 'THE THINGS \r\n'
90 | # 'WE ARE \r\n'
91 | # 'CAPABLE \r\n'
92 | # 'OF DOING, \r\n'
93 | # 'WE WOULD \r\n'
94 | # 'LITERALLY \r\n'
95 | # 'ASTOUND \r\n'
96 | # 'OURSELVE \r\n'}],
97 | # 'review_id': '201901i6e4de824b0cf4aa587ac37f922f584c2',
98 | # 'status': 'Complete',
99 | # 'team_name': 'cspythonsdk',
100 | # 'type': 'Image',
101 | # # 'workflow_id': 'textdetection'}
102 |
103 |
104 | if __name__ == "__main__":
105 | import sys, os.path
106 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
107 | from samples.tools import execute_samples
108 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
109 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_image_list_samples.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 | import time
4 |
5 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
6 | from azure.cognitiveservices.vision.contentmoderator.models import (
7 | APIErrorException,
8 | ImageList,
9 | ImageIds,
10 | Image,
11 | RefreshIndex,
12 | MatchResponse
13 | )
14 | from msrest.authentication import CognitiveServicesCredentials
15 |
16 | # Add your Azure Content Moderator subscription key to your environment variables.
17 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
18 |
19 | # The number of minutes to delay after updating the search index before
20 | # performing image match operations against the list.
21 | LATENCY_DELAY = 0.5
22 |
23 | IMAGE_LIST = {
24 | "Sports": [
25 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample4.png",
26 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample6.png",
27 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample9.png"
28 | ],
29 | "Swimsuit": [
30 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample1.jpg",
31 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample3.png",
32 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample4.png",
33 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample16.png"
34 | ]
35 | }
36 |
37 | IMAGES_TO_MATCH = [
38 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample1.jpg",
39 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample4.png",
40 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png",
41 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample16.png"
42 | ]
43 |
44 |
45 | def image_lists(subscription_key):
46 | """ImageList.
47 |
48 | This will review an image using workflow and job.
49 | """
50 |
51 | client = ContentModeratorClient(
52 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
53 |
54 | credentials=CognitiveServicesCredentials(subscription_key)
55 | )
56 |
57 | print("Creating list MyList\n")
58 | custom_list = client.list_management_image_lists.create(
59 | content_type="application/json",
60 | body={
61 | "name": "MyList",
62 | "description": "A sample list",
63 | "metadata": {
64 | "key_one": "Acceptable",
65 | "key_two": "Potentially racy"
66 | }
67 | }
68 | )
69 | print("List created:")
70 | assert isinstance(custom_list, ImageList)
71 | pprint(custom_list.as_dict())
72 | list_id = custom_list.id
73 |
74 | #
75 | # Add images
76 | #
77 |
78 | def add_images(list_id, image_url, label):
79 | """Generic add_images from url and label."""
80 | print("\nAdding image {} to list {} with label {}.".format(
81 | image_url, list_id, label))
82 | try:
83 | added_image = client.list_management_image.add_image_url_input(
84 | list_id=list_id,
85 | content_type="application/json",
86 | data_representation="URL",
87 | value=image_url,
88 | label=label
89 | )
90 | except APIErrorException as err:
91 | # sample4 will fail
92 | print("Unable to add image to list: {}".format(err))
93 | else:
94 | assert isinstance(added_image, Image)
95 | pprint(added_image.as_dict())
96 | return added_image
97 |
98 | print("\nAdding images to list {}".format(list_id))
99 | index = {} # Keep an index url to id for later removal
100 | for label, urls in IMAGE_LIST.items():
101 | for url in urls:
102 | image = add_images(list_id, url, label)
103 | if image:
104 | index[url] = image.content_id
105 |
106 | #
107 | # Get all images ids
108 | #
109 | print("\nGetting all image IDs for list {}".format(list_id))
110 | image_ids = client.list_management_image.get_all_image_ids(list_id=list_id)
111 | assert isinstance(image_ids, ImageIds)
112 | pprint(image_ids.as_dict())
113 |
114 | #
115 | # Update list details
116 | #
117 | print("\nUpdating details for list {}".format(list_id))
118 | updated_list = client.list_management_image_lists.update(
119 | list_id=list_id,
120 | content_type="application/json",
121 | body={
122 | "name": "Swimsuits and sports"
123 | }
124 | )
125 | assert isinstance(updated_list, ImageList)
126 | pprint(updated_list.as_dict())
127 |
128 | #
129 | # Get list details
130 | #
131 | print("\nGetting details for list {}".format(list_id))
132 | list_details = client.list_management_image_lists.get_details(
133 | list_id=list_id)
134 | assert isinstance(list_details, ImageList)
135 | pprint(list_details.as_dict())
136 |
137 | #
138 | # Refresh the index
139 | #
140 | print("\nRefreshing the search index for list {}".format(list_id))
141 | refresh_index = client.list_management_image_lists.refresh_index_method(
142 | list_id=list_id)
143 | assert isinstance(refresh_index, RefreshIndex)
144 | pprint(refresh_index.as_dict())
145 |
146 | print("\nWaiting {} minutes to allow the server time to propagate the index changes.".format(
147 | LATENCY_DELAY))
148 | time.sleep(LATENCY_DELAY * 60)
149 |
150 | #
151 | # Match images against the image list.
152 | #
153 | for image_url in IMAGES_TO_MATCH:
154 | print("\nMatching image {} against list {}".format(image_url, list_id))
155 | match_result = client.image_moderation.match_url_input(
156 | content_type="application/json",
157 | list_id=list_id,
158 | data_representation="URL",
159 | value=image_url,
160 | )
161 | assert isinstance(match_result, MatchResponse)
162 | print("Is match? {}".format(match_result.is_match))
163 | print("Complete match details:")
164 | pprint(match_result.as_dict())
165 |
166 | #
167 | # Remove images
168 | #
169 | correction = "https://moderatorsampleimages.blob.core.windows.net/samples/sample16.png"
170 | print("\nRemove image {} from list {}".format(correction, list_id))
171 | client.list_management_image.delete_image(
172 | list_id=list_id,
173 | image_id=index[correction]
174 | )
175 |
176 | #
177 | # Refresh the index
178 | #
179 | print("\nRefreshing the search index for list {}".format(list_id))
180 | client.list_management_image_lists.refresh_index_method(list_id=list_id)
181 |
182 | print("\nWaiting {} minutes to allow the server time to propagate the index changes.".format(
183 | LATENCY_DELAY))
184 | time.sleep(LATENCY_DELAY * 60)
185 |
186 | #
187 | # Re-match
188 | #
189 | print("\nMatching image. The removed image should not match")
190 | for image_url in IMAGES_TO_MATCH:
191 | print("\nMatching image {} against list {}".format(image_url, list_id))
192 | match_result = client.image_moderation.match_url_input(
193 | content_type="application/json",
194 | list_id=list_id,
195 | data_representation="URL",
196 | value=image_url,
197 | )
198 | assert isinstance(match_result, MatchResponse)
199 | print("Is match? {}".format(match_result.is_match))
200 | print("Complete match details:")
201 | pprint(match_result.as_dict())
202 |
203 | #
204 | # Delete all images
205 | #
206 | print("\nDelete all images in the image list {}".format(list_id))
207 | client.list_management_image.delete_all_images(list_id=list_id)
208 |
209 | #
210 | # Delete list
211 | #
212 | print("\nDelete the image list {}".format(list_id))
213 | client.list_management_image_lists.delete(list_id=list_id)
214 |
215 | #
216 | # Get all list ids
217 | #
218 | print("\nVerify that the list {} was deleted.".format(list_id))
219 | image_lists = client.list_management_image_lists.get_all_image_lists()
220 | assert not any(list_id == image_list.id for image_list in image_lists)
221 |
222 |
223 | if __name__ == "__main__":
224 | import sys, os.path
225 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
226 | from samples.tools import execute_samples
227 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
228 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_image_moderation_samples.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 |
4 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
5 | from azure.cognitiveservices.vision.contentmoderator.models import (
6 | Evaluate,
7 | OCR,
8 | FoundFaces
9 | )
10 | from msrest.authentication import CognitiveServicesCredentials
11 |
12 | # Add your Azure Content Moderator subscription key to your environment variables.
13 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
14 |
15 | IMAGE_LIST = [
16 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample2.jpg",
17 | "https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png"
18 | ]
19 |
20 |
21 | def image_moderation(subscription_key):
22 | """ImageModeration.
23 |
24 | This will review an image using workflow and job.
25 | """
26 |
27 | client = ContentModeratorClient(
28 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
29 | credentials=CognitiveServicesCredentials(subscription_key)
30 | )
31 |
32 | for image_url in IMAGE_LIST:
33 | print("\nEvaluate image {}".format(image_url))
34 |
35 | print("\nEvaluate for adult and racy content.")
36 | evaluation = client.image_moderation.evaluate_url_input(
37 | content_type="application/json",
38 | cache_image=True,
39 | data_representation="URL",
40 | value=image_url
41 | )
42 | assert isinstance(evaluation, Evaluate)
43 | pprint(evaluation.as_dict())
44 |
45 | print("\nDetect and extract text.")
46 | evaluation = client.image_moderation.ocr_url_input(
47 | language="eng",
48 | content_type="application/json",
49 | data_representation="URL",
50 | value=image_url,
51 | cache_image=True,
52 | )
53 | assert isinstance(evaluation, OCR)
54 | pprint(evaluation.as_dict())
55 |
56 | print("\nDetect faces.")
57 | evaluation = client.image_moderation.find_faces_url_input(
58 | content_type="application/json",
59 | cache_image=True,
60 | data_representation="URL",
61 | value=image_url
62 | )
63 | assert isinstance(evaluation, FoundFaces)
64 | pprint(evaluation.as_dict())
65 |
66 |
67 | if __name__ == "__main__":
68 | import sys, os.path
69 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
70 | from samples.tools import execute_samples
71 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
72 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_image_review.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 | import uuid
4 |
5 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
6 | from msrest.authentication import CognitiveServicesCredentials
7 |
8 | # Add your Azure Content Moderator subscription key to your environment variables.
9 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
10 |
11 | def image_review(subscription_key):
12 | """ImageReview.
13 |
14 | This will create a review for images.
15 | """
16 |
17 | # The name of the team to assign the job to.
18 | # This must be the team name you used to create your Content Moderator account. You can
19 | # retrieve your team name from the Content Moderator web site. Your team name is the Id
20 | # associated with your subscription.
21 | team_name = "insert your team name here"
22 |
23 | # An image to review
24 | image_url = "https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png"
25 |
26 | # Where you want to receive the approval/refuse event. This is the only way to get this information.
27 | call_back_endpoint = "https://requestb.in/qmsakwqm"
28 |
29 | client = ContentModeratorClient(
30 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
31 | credentials=CognitiveServicesCredentials(subscription_key)
32 | )
33 |
34 | print("Create review for {}.\n".format(image_url))
35 | review_item = {
36 | "type": "Image", # Possible values include: 'Image', 'Text'
37 | "content": image_url, # How to download the image
38 | "content_id": uuid.uuid4(), # Random id
39 | "callback_endpoint": call_back_endpoint,
40 | "metadata": [{
41 | "key": "sc",
42 | "value": True # will be sent to Azure as "str" cast.
43 | }]
44 | }
45 |
46 | reviews = client.reviews.create_reviews(
47 | url_content_type="application/json",
48 | team_name=team_name,
49 | create_review_body=[review_item] # As many review item as you need
50 | )
51 | review_id = reviews[0] # Ordered list of string of review ID
52 |
53 | print("\nGet review details")
54 | review_details = client.reviews.get_review(
55 | team_name=team_name, review_id=review_id)
56 | pprint(review_details.as_dict())
57 |
58 | input("\nPerform manual reviews on the Content Moderator Review Site, and hit enter here.")
59 |
60 | print("\nGet review details")
61 | review_details = client.reviews.get_review(
62 | team_name=team_name, review_id=review_id)
63 | pprint(review_details.as_dict())
64 |
65 | # Your call back endpoint should have received an event like this:
66 | # {'callback_endpoint': 'https://requestb.in/qmsakwqm',
67 | # 'content': '',
68 | # 'content_id': '3ebe16cb-31ed-4292-8b71-1dfe9b0e821f',
69 | # 'created_by': 'cspythonsdk',
70 | # 'metadata': [{'key': 'sc', 'value': 'True'}],
71 | # 'review_id': '201901i14682e2afe624fee95ebb248643139e7',
72 | # 'reviewer_result_tags': [{'key': 'a', 'value': 'True'},
73 | # {'key': 'r', 'value': 'True'}],
74 | # 'status': 'Complete',
75 | # 'sub_team': 'public',
76 | # 'type': 'Image'}
77 |
78 |
79 | if __name__ == "__main__":
80 | import sys, os.path
81 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
82 | from samples.tools import execute_samples
83 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
84 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_term_list_samples.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 | import time
4 |
5 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
6 | from azure.cognitiveservices.vision.contentmoderator.models import (
7 | TermList,
8 | Terms,
9 | TermsData,
10 | RefreshIndex,
11 | Screen
12 | )
13 | from msrest.authentication import CognitiveServicesCredentials
14 |
15 | # Add your Azure Content Moderator subscription key to your environment variables.
16 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
17 |
18 | TEXT_FOLDER = os.path.join(os.path.dirname(
19 | os.path.realpath(__file__)), "text_files")
20 |
21 | # The number of minutes to delay after updating the search index before
22 | # performing image match operations against the list.
23 | LATENCY_DELAY = 0.5
24 |
25 |
26 | def terms_lists(subscription_key):
27 | """TermsList.
28 |
29 | This will screen text using a term list.
30 | """
31 |
32 | client = ContentModeratorClient(
33 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
34 | credentials=CognitiveServicesCredentials(subscription_key)
35 | )
36 |
37 | #
38 | # Create list
39 | #
40 |
41 | print("\nCreating list")
42 | custom_list = client.list_management_term_lists.create(
43 | content_type="application/json",
44 | body={
45 | "name": "Term list name",
46 | "description": "Term list description",
47 | }
48 | )
49 | print("List created:")
50 | assert isinstance(custom_list, TermList)
51 | pprint(custom_list.as_dict())
52 | list_id = custom_list.id
53 |
54 | #
55 | # Update list details
56 | #
57 | print("\nUpdating details for list {}".format(list_id))
58 | updated_list = client.list_management_term_lists.update(
59 | list_id=list_id,
60 | content_type="application/json",
61 | body={
62 | "name": "New name",
63 | "description": "New description"
64 | }
65 | )
66 | assert isinstance(updated_list, TermList)
67 | pprint(updated_list.as_dict())
68 |
69 | #
70 | # Add terms
71 | #
72 | print("\nAdding terms to list {}".format(list_id))
73 | client.list_management_term.add_term(
74 | list_id=list_id,
75 | term="term1",
76 | language="eng"
77 | )
78 | client.list_management_term.add_term(
79 | list_id=list_id,
80 | term="term2",
81 | language="eng"
82 | )
83 |
84 | #
85 | # Get all terms ids
86 | #
87 | print("\nGetting all term IDs for list {}".format(list_id))
88 | terms = client.list_management_term.get_all_terms(
89 | list_id=list_id, language="eng")
90 | assert isinstance(terms, Terms)
91 | terms_data = terms.data
92 | assert isinstance(terms_data, TermsData)
93 | pprint(terms_data.as_dict())
94 |
95 | #
96 | # Refresh the index
97 | #
98 | print("\nRefreshing the search index for list {}".format(list_id))
99 | refresh_index = client.list_management_term_lists.refresh_index_method(
100 | list_id=list_id, language="eng")
101 | assert isinstance(refresh_index, RefreshIndex)
102 | pprint(refresh_index.as_dict())
103 |
104 | print("\nWaiting {} minutes to allow the server time to propagate the index changes.".format(
105 | LATENCY_DELAY))
106 | time.sleep(LATENCY_DELAY * 60)
107 |
108 | #
109 | # Screen text
110 | #
111 | text = 'This text contains the terms "term1" and "term2".'
112 | print('\nScreening text "{}" using term list {}'.format(text, list_id))
113 | with open(os.path.join(TEXT_FOLDER, 'content_moderator_term_list.txt'), "rb") as text_fd:
114 | screen = client.text_moderation.screen_text(
115 | text_content_type="text/plain",
116 | text_content=text_fd,
117 | language="eng",
118 | autocorrect=False,
119 | pii=False,
120 | list_id=list_id
121 | )
122 | assert isinstance(screen, Screen)
123 | pprint(screen.as_dict())
124 |
125 | #
126 | # Remove terms
127 | #
128 | term_to_remove = "term1"
129 | print("\nRemove term {} from list {}".format(term_to_remove, list_id))
130 | client.list_management_term.delete_term(
131 | list_id=list_id,
132 | term=term_to_remove,
133 | language="eng"
134 | )
135 |
136 | #
137 | # Refresh the index
138 | #
139 | print("\nRefreshing the search index for list {}".format(list_id))
140 | refresh_index = client.list_management_term_lists.refresh_index_method(
141 | list_id=list_id, language="eng")
142 | assert isinstance(refresh_index, RefreshIndex)
143 | pprint(refresh_index.as_dict())
144 |
145 | print("\nWaiting {} minutes to allow the server time to propagate the index changes.".format(
146 | LATENCY_DELAY))
147 | time.sleep(LATENCY_DELAY * 60)
148 |
149 | #
150 | # Re-Screen text
151 | #
152 | with open(os.path.join(TEXT_FOLDER, 'content_moderator_term_list.txt'), "rb") as text_fd:
153 | print('\nScreening text "{}" using term list {}'.format(text, list_id))
154 | screen = client.text_moderation.screen_text(
155 | text_content_type="text/plain",
156 | text_content=text_fd,
157 | language="eng",
158 | autocorrect=False,
159 | pii=False,
160 | list_id=list_id
161 | )
162 | assert isinstance(screen, Screen)
163 | pprint(screen.as_dict())
164 |
165 | #
166 | # Delete all terms
167 | #
168 | print("\nDelete all terms in the image list {}".format(list_id))
169 | client.list_management_term.delete_all_terms(
170 | list_id=list_id, language="eng")
171 |
172 | #
173 | # Delete list
174 | #
175 | print("\nDelete the term list {}".format(list_id))
176 | client.list_management_term_lists.delete(list_id=list_id)
177 |
178 |
179 | if __name__ == "__main__":
180 | import sys, os.path
181 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
182 | from samples.tools import execute_samples
183 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
184 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_text_moderation_samples.py:
--------------------------------------------------------------------------------
1 | import os, os.path
2 | from pprint import pprint
3 |
4 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
5 | from azure.cognitiveservices.vision.contentmoderator.models import (
6 | Screen
7 | )
8 | from msrest.authentication import CognitiveServicesCredentials
9 |
10 | # Add your Azure Content Moderator subscription key to your environment variables.
11 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
12 |
13 | TEXT_FOLDER = os.path.join(os.path.dirname(
14 | os.path.realpath(__file__)), "text_files")
15 |
16 | # The number of minutes to delay after updating the search index before
17 | # performing image match operations against the list.
18 | LATENCY_DELAY = 0.5
19 |
20 |
21 | def text_moderation(subscription_key):
22 | """TextModeration.
23 |
24 | This will moderate a given long text.
25 | """
26 |
27 | client = ContentModeratorClient(
28 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
29 | credentials=CognitiveServicesCredentials(subscription_key)
30 | )
31 |
32 | # Screen the input text: check for profanity,
33 | # do autocorrect text, and check for personally identifying
34 | # information (PII)
35 | with open(os.path.join(TEXT_FOLDER, 'content_moderator_text_moderation.txt'), "rb") as text_fd:
36 | screen = client.text_moderation.screen_text(
37 | text_content_type="text/plain",
38 | text_content=text_fd,
39 | language="eng",
40 | autocorrect=True,
41 | pii=True
42 | )
43 | assert isinstance(screen, Screen)
44 | pprint(screen.as_dict())
45 |
46 |
47 | if __name__ == "__main__":
48 | import sys, os.path
49 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
50 | from samples.tools import execute_samples
51 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
52 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_video_review.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from pprint import pprint
3 | from random import random
4 | import uuid
5 |
6 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
7 | from azure.cognitiveservices.vision.contentmoderator.models import Frames
8 | from msrest.authentication import CognitiveServicesCredentials
9 |
10 | # Add your Azure Content Moderator subscription key to your environment variables.
11 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
12 |
13 | def video_review(subscription_key):
14 | """VideoReview.
15 |
16 | This will create and publish a review for video
17 | """
18 |
19 | # The name of the team to assign the job to.
20 | # This must be the team name you used to create your Content Moderator account. You can
21 | # retrieve your team name from the Content Moderator web site. Your team name is the Id
22 | # associated with your subscription.
23 | team_name = "insert your team name here"
24 |
25 | # Create a review with the content pointing to a streaming endpoint (manifest)
26 | streamingcontent = "https://amssamples.streaming.mediaservices.windows.net/91492735-c523-432b-ba01-faba6c2206a2/AzureMediaServicesPromo.ism/manifest"
27 |
28 | frame1_url = "https://blobthebuilder.blob.core.windows.net/sampleframes/ams-video-frame1-00-17.PNG"
29 | frame2_url = "https://blobthebuilder.blob.core.windows.net/sampleframes/ams-video-frame-2-01-04.PNG"
30 | frame3_url = "https://blobthebuilder.blob.core.windows.net/sampleframes/ams-video-frame-3-02-24.PNG"
31 |
32 | client = ContentModeratorClient(
33 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
34 | credentials=CognitiveServicesCredentials(subscription_key)
35 | )
36 |
37 | #
38 | # Create a video review
39 | #
40 | print("Create review for {}.\n".format(streamingcontent))
41 | review_item = {
42 | "content": streamingcontent, # How to download the image
43 | "content_id": uuid.uuid4(), # Random id
44 | # Note: to create a published review, set the Status to "Pending".
45 | # However, you cannot add video frames or a transcript to a published review.
46 | "status": "Unpublished"
47 | }
48 |
49 | reviews = client.reviews.create_video_reviews(
50 | content_type="application/json",
51 | team_name=team_name,
52 | # As many review item as you need
53 | create_video_reviews_body=[review_item]
54 | )
55 | review_id = reviews[0] # Ordered list of string of review ID
56 |
57 | #
58 | # Add the frames from 17, 64, and 144 seconds.
59 | #
60 | print("\nAdding frames to the review {}".format(review_id))
61 |
62 | def create_frames_to_add_to_reviews(timestamp_seconds, url):
63 | return {
64 | 'timestamp': timestamp_seconds * 1000,
65 | 'frame_image': url,
66 | 'reviewer_result_tags': [
67 | # Note: All non str value will be casted using "str()"
68 | {'key': 'reviewRecommended', 'value': True},
69 | {'key': 'adultScore', 'value': random()},
70 | {'key': 'a', 'value': False},
71 | {'key': 'racyScore', 'value': random()},
72 | {'key': 'a', 'value': False},
73 | ],
74 | 'metadata': [
75 | # Note: All non str value will be casted using "str()"
76 | {'key': 'tag1', 'value': 'tag1'},
77 | ]
78 | }
79 |
80 | client.reviews.add_video_frame_url(
81 | content_type="application/json",
82 | team_name=team_name,
83 | review_id=review_id,
84 | video_frame_body=[
85 | create_frames_to_add_to_reviews(17, frame1_url),
86 | create_frames_to_add_to_reviews(64, frame2_url),
87 | create_frames_to_add_to_reviews(144, frame3_url)
88 | ]
89 | )
90 |
91 | #
92 | # Get frames
93 | #
94 | print("\nGetting frames for the review with ID {}".format(review_id))
95 | frames = client.reviews.get_video_frames(
96 | team_name=team_name,
97 | review_id=review_id,
98 | start_seed=0,
99 | no_of_records=100
100 | )
101 | assert isinstance(frames, Frames)
102 | pprint(frames.as_dict())
103 |
104 | #
105 | # Get reviews details
106 | #
107 | print("\nGetting review details for the review with ID {}".format(review_id))
108 | review_details = client.reviews.get_review(
109 | team_name=team_name, review_id=review_id)
110 | pprint(review_details.as_dict())
111 |
112 | #
113 | # Public review
114 | #
115 | client.reviews.publish_video_review(
116 | team_name=team_name, review_id=review_id)
117 |
118 | print("\nOpen your Content Moderator Dashboard and select Review > Video to see the review.")
119 |
120 |
121 | if __name__ == "__main__":
122 | import sys, os.path
123 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
124 | from samples.tools import execute_samples
125 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
126 |
--------------------------------------------------------------------------------
/samples/vision/content_moderator_video_transcript_review.py:
--------------------------------------------------------------------------------
1 | from io import BytesIO
2 | import os, os.path
3 | from pprint import pprint
4 | from random import random
5 | import uuid
6 |
7 | from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
8 | from azure.cognitiveservices.vision.contentmoderator.models import Content, Review, Frames, Screen
9 | from msrest.authentication import CognitiveServicesCredentials
10 |
11 | # Add your Azure Content Moderator subscription key to your environment variables.
12 | SUBSCRIPTION_KEY = os.environ['CONTENT_MODERATOR_SUBSCRIPTION_KEY']
13 |
14 | TEXT_FOLDER = os.path.join(os.path.dirname(
15 | os.path.realpath(__file__)), "text_files")
16 |
17 |
18 | def video_transcript_review(subscription_key):
19 | """VideoTranscriptReview.
20 |
21 | This will create and publish a transcript review for video
22 | """
23 |
24 | # The name of the team to assign the job to.
25 | # This must be the team name you used to create your Content Moderator account. You can
26 | # retrieve your team name from the Content Moderator web site. Your team name is the Id
27 | # associated with your subscription.
28 | team_name = "insert your team name here"
29 |
30 | # Create a review with the content pointing to a streaming endpoint (manifest)
31 | streamingcontent = "https://amssamples.streaming.mediaservices.windows.net/91492735-c523-432b-ba01-faba6c2206a2/AzureMediaServicesPromo.ism/manifest"
32 |
33 | transcript = b"""WEBVTT
34 |
35 | 01:01.000 --> 02:02.000
36 | First line with a crap word in a transcript.
37 |
38 | 02:03.000 --> 02:25.000
39 | This is another line in the transcript.
40 | """
41 |
42 | client = ContentModeratorClient(
43 | endpoint=os.environ['CONTENT_MODERATOR_ENDPOINT'], # Add your Content Moderator endpoint to your environment variables.
44 | credentials=CognitiveServicesCredentials(subscription_key)
45 | )
46 |
47 | #
48 | # Create a video review
49 | #
50 | print("Create review for {}.\n".format(streamingcontent))
51 | review_item = {
52 | "content": streamingcontent, # How to download the image
53 | "content_id": uuid.uuid4(), # Random id
54 | # Note: to create a published review, set the Status to "Pending".
55 | # However, you cannot add video frames or a transcript to a published review.
56 | "status": "Unpublished"
57 | }
58 |
59 | reviews = client.reviews.create_video_reviews(
60 | content_type="application/json",
61 | team_name=team_name,
62 | # As many review item as you need
63 | create_video_reviews_body=[review_item]
64 | )
65 | review_id = reviews[0] # Ordered list of string of review ID
66 |
67 | #
68 | # Add transcript
69 | #
70 | print("\nAdding transcript to the review {}".format(review_id))
71 | client.reviews.add_video_transcript(
72 | team_name=team_name,
73 | review_id=review_id,
74 | # Can be a file descriptor, as long as its stream type
75 | vt_tfile=BytesIO(transcript),
76 | )
77 |
78 | #
79 | # Add transcript moderation result
80 | #
81 | print("\nAdding a transcript moderation result to the review with ID {}".format(review_id))
82 | with open(os.path.join(TEXT_FOLDER, 'content_moderator_video_transcript.txt'), "rb") as text_fd:
83 | screen = client.text_moderation.screen_text(
84 | text_content_type="text/plain",
85 | text_content=text_fd,
86 | language="eng"
87 | )
88 | assert isinstance(screen, Screen)
89 | pprint(screen.as_dict())
90 |
91 | # Build a terms list with index
92 | terms = []
93 | for term in screen.terms:
94 | terms.append({"index": term.index, "term": term.term})
95 |
96 | client.reviews.add_video_transcript_moderation_result(
97 | content_type="application/json",
98 | team_name=team_name,
99 | review_id=review_id,
100 | transcript_moderation_body=[{
101 | "timestamp": 0,
102 | "terms": terms
103 | }]
104 | )
105 |
106 | #
107 | # Public review
108 | #
109 | client.reviews.publish_video_review(
110 | team_name=team_name, review_id=review_id)
111 |
112 | print("\nOpen your Content Moderator Dashboard and select Review > Video to see the review.")
113 |
114 |
115 | if __name__ == "__main__":
116 | import sys, os.path
117 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
118 | from samples.tools import execute_samples
119 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
120 |
--------------------------------------------------------------------------------
/samples/vision/custom_vision_object_detection_sample.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 |
5 | from azure.cognitiveservices.vision.customvision.training import training_api
6 | from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region
7 | from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
8 |
9 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
10 |
11 | # Replace with a valid key
12 | SUBSCRIPTION_KEY_ENV_NAME = "CUSTOMVISION_TRAINING_KEY"
13 | PREDICTION_RESOURCE_ID_KEY_ENV_NAME = "CUSTOMVISION_PREDICTION_ID"
14 | PREDICTION_KEY_ENV_NAME = "CUSTOMVISION_PREDICTION_KEY"
15 |
16 | PUBLISH_ITERATION_NAME = "classifyModel"
17 |
18 | ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"
19 |
20 | # Add this directory to the path so that custom_vision_training_samples can be found
21 | sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "."))
22 |
23 | IMAGES_FOLDER = os.path.join(os.path.dirname(
24 | os.path.realpath(__file__)), "images")
25 |
26 |
27 | def run_sample(subscription_key):
28 | try:
29 | prediction_key = os.environ[PREDICTION_KEY_ENV_NAME]
30 | except KeyError:
31 | from samples.tools import SubscriptionKeyError
32 | raise SubscriptionKeyError("You need to set the {} env variable.".format(PREDICTION_KEY_ENV_NAME))
33 |
34 | project, iteration = train_project(subscription_key)
35 | predict_project(prediction_key, project, iteration)
36 |
37 |
38 | def train_project(training_key):
39 | try:
40 | prediction_resource_id = os.environ[PREDICTION_RESOURCE_ID_KEY_ENV_NAME]
41 | except KeyError:
42 | raise PredictionResourceMissingError("Didn't find a prediction resource to publish to. Please set the {} environment variable".format(PREDICTION_RESOURCE_ID_KEY_ENV_NAME))
43 |
44 | trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
45 |
46 | # Find the object detection domain
47 |
48 | obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection" and domain.name == "General")
49 |
50 | # Create a new project
51 | print("Creating project...")
52 | project = trainer.create_project(
53 | "My Detection Project", domain_id=obj_detection_domain.id)
54 |
55 | # Make two tags in the new project
56 | fork_tag = trainer.create_tag(project.id, "fork")
57 | scissors_tag = trainer.create_tag(project.id, "scissors")
58 |
59 | fork_image_regions = {
60 | "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
61 | "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
62 | "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
63 | "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
64 | "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
65 | "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
66 | "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
67 | "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
68 | "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
69 | "fork_10": [0.05269608, 0.282303959, 0.8088235, 0.452614367],
70 | "fork_11": [0.05759804, 0.0894935, 0.9007353, 0.3251634],
71 | "fork_12": [0.3345588, 0.07315363, 0.375, 0.9150327],
72 | "fork_13": [0.269607842, 0.194068655, 0.4093137, 0.6732026],
73 | "fork_14": [0.143382356, 0.218578458, 0.7977941, 0.295751631],
74 | "fork_15": [0.19240196, 0.0633497, 0.5710784, 0.8398692],
75 | "fork_16": [0.140931368, 0.480016381, 0.6838235, 0.240196079],
76 | "fork_17": [0.305147052, 0.2512582, 0.4791667, 0.5408496],
77 | "fork_18": [0.234068632, 0.445702642, 0.6127451, 0.344771236],
78 | "fork_19": [0.219362751, 0.141781077, 0.5919118, 0.6683006],
79 | "fork_20": [0.180147052, 0.239820287, 0.6887255, 0.235294119]
80 | }
81 |
82 | scissors_image_regions = {
83 | "scissors_1": [0.4007353, 0.194068655, 0.259803921, 0.6617647],
84 | "scissors_2": [0.426470578, 0.185898721, 0.172794119, 0.5539216],
85 | "scissors_3": [0.289215684, 0.259428144, 0.403186262, 0.421568632],
86 | "scissors_4": [0.343137264, 0.105833367, 0.332107842, 0.8055556],
87 | "scissors_5": [0.3125, 0.09766343, 0.435049027, 0.71405226],
88 | "scissors_6": [0.379901975, 0.24308826, 0.32107842, 0.5718954],
89 | "scissors_7": [0.341911763, 0.20714055, 0.3137255, 0.6356209],
90 | "scissors_8": [0.231617644, 0.08459154, 0.504901946, 0.8480392],
91 | "scissors_9": [0.170343131, 0.332957536, 0.767156839, 0.403594762],
92 | "scissors_10": [0.204656869, 0.120539248, 0.5245098, 0.743464053],
93 | "scissors_11": [0.05514706, 0.159754932, 0.799019635, 0.730392158],
94 | "scissors_12": [0.265931368, 0.169558853, 0.5061275, 0.606209159],
95 | "scissors_13": [0.241421565, 0.184264734, 0.448529422, 0.6830065],
96 | "scissors_14": [0.05759804, 0.05027781, 0.75, 0.882352948],
97 | "scissors_15": [0.191176474, 0.169558853, 0.6936275, 0.6748366],
98 | "scissors_16": [0.1004902, 0.279036, 0.6911765, 0.477124184],
99 | "scissors_17": [0.2720588, 0.131977156, 0.4987745, 0.6911765],
100 | "scissors_18": [0.180147052, 0.112369314, 0.6262255, 0.6666667],
101 | "scissors_19": [0.333333343, 0.0274019931, 0.443627447, 0.852941155],
102 | "scissors_20": [0.158088237, 0.04047389, 0.6691176, 0.843137264]
103 | }
104 |
105 | # Go through the data table above and create the images
106 | print("Adding images...")
107 | tagged_images_with_regions = []
108 |
109 | for file_name in fork_image_regions.keys():
110 | x, y, w, h = fork_image_regions[file_name]
111 | regions = [Region(tag_id=fork_tag.id, left=x,
112 | top=y, width=w, height=h)]
113 |
114 | with open(os.path.join(IMAGES_FOLDER, "fork", file_name + ".jpg"), mode="rb") as image_contents:
115 | tagged_images_with_regions.append(ImageFileCreateEntry(
116 | name=file_name, contents=image_contents.read(), regions=regions))
117 |
118 | for file_name in scissors_image_regions.keys():
119 | x, y, w, h = scissors_image_regions[file_name]
120 | regions = [Region(tag_id=scissors_tag.id, left=x,
121 | top=y, width=w, height=h)]
122 |
123 | with open(os.path.join(IMAGES_FOLDER, "scissors", file_name + ".jpg"), mode="rb") as image_contents:
124 | tagged_images_with_regions.append(ImageFileCreateEntry(
125 | name=file_name, contents=image_contents.read(), regions=regions))
126 |
127 | trainer.create_images_from_files(project.id, images=tagged_images_with_regions)
128 |
129 | print ("Training...")
130 |
131 | iteration = trainer.train_project(project.id)
132 | while (iteration.status != "Completed"):
133 | iteration = trainer.get_iteration(project.id, iteration.id)
134 | print("Training status: " + iteration.status)
135 | time.sleep(1)
136 |
137 |
138 | # The iteration is now trained. Name and publish this iteration to a prediciton endpoint
139 | trainer.publish_iteration(project.id, iteration.id, PUBLISH_ITERATION_NAME, prediction_resource_id)
140 | print ("Done!")
141 |
142 | # The iteration is now trained. Make it the default project endpoint
143 | trainer.update_iteration(project.id, iteration.id, is_default=True)
144 | print("Done!")
145 |
146 | return project, iteration
147 |
148 |
149 | def predict_project(prediction_key, project, iteration):
150 | predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)
151 |
152 | # Open the sample image and get back the prediction results.
153 | with open(os.path.join(IMAGES_FOLDER, "Test", "test_od_image.jpg"), mode="rb") as test_data:
154 | results = predictor.detect_image(project.id, PUBLISH_ITERATION_NAME, test_data)
155 |
156 | # Display the results.
157 | for prediction in results.predictions:
158 | print("\t" + prediction.tag_name + ": {0:.2f}% bbox.left = {1:.2f}, bbox.top = {2:.2f}, bbox.width = {3:.2f}, bbox.height = {4:.2f}".format(prediction.probability * 100, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height))
159 |
160 |
161 | if __name__ == "__main__":
162 | from samples.tools import execute_samples
163 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
164 |
--------------------------------------------------------------------------------
/samples/vision/custom_vision_prediction_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
5 | from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
6 |
7 | TRAINING_KEY_ENV_NAME = "CUSTOMVISION_TRAINING_KEY"
8 | SUBSCRIPTION_KEY_ENV_NAME = "CUSTOMVISION_PREDICTION_KEY"
9 |
10 | PUBLISH_ITERATION_NAME = "classifyModel"
11 |
12 | ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"
13 |
14 | # Add this directory to the path so that custom_vision_training_samples can be found
15 | sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "."))
16 |
17 | IMAGES_FOLDER = os.path.join(os.path.dirname(
18 | os.path.realpath(__file__)), "images")
19 |
20 |
21 | def find_or_train_project():
22 | try:
23 | training_key = os.environ[TRAINING_KEY_ENV_NAME]
24 | except KeyError:
25 | raise SubscriptionKeyError("You need to set the {} env variable.".format(TRAINING_KEY_ENV_NAME))
26 |
27 | # Use the training API to find the SDK sample project created from the training example.
28 | from custom_vision_training_samples import train_project, SAMPLE_PROJECT_NAME
29 | trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
30 |
31 | for proj in trainer.get_projects():
32 | if (proj.name == SAMPLE_PROJECT_NAME):
33 | return proj
34 |
35 | # Or, if not found, we will run the training example to create it.
36 | return train_project(training_key)
37 |
38 |
39 | def predict_project(subscription_key):
40 | predictor = CustomVisionPredictionClient(
41 | subscription_key, endpoint=ENDPOINT)
42 |
43 | # Find or train a new project to use for prediction.
44 | project = find_or_train_project()
45 |
46 | with open(os.path.join(IMAGES_FOLDER, "Test", "test_image.jpg"), mode="rb") as test_data:
47 | results = predictor.classify_image(project.id, PUBLISH_ITERATION_NAME, test_data.read())
48 |
49 | # Display the results.
50 | for prediction in results.predictions:
51 | print("\t" + prediction.tag_name +
52 | ": {0:.2f}%".format(prediction.probability * 100))
53 |
54 |
55 | if __name__ == "__main__":
56 | import sys, os.path
57 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
58 | from samples.tools import execute_samples, SubscriptionKeyError
59 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
--------------------------------------------------------------------------------
/samples/vision/custom_vision_training_multiclass_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
5 | from azure.cognitiveservices.vision.customvision.training.models import Classifier
6 |
7 | SUBSCRIPTION_KEY_ENV_NAME = "CUSTOMVISION_TRAINING_KEY"
8 | PREDICTION_RESOURCE_ID_KEY_ENV_NAME = "CUSTOMVISION_PREDICTION_ID"
9 |
10 | SAMPLE_PROJECT_NAME = "Python SDK Sample"
11 |
12 | # The prediction resource can be found with your keys and is tied to the Prediction Key
13 | PREDICTION_RESOURCE_ID = "enter your prediction resource"
14 |
15 | PUBLISH_ITERATION_NAME = "classifyModel"
16 |
17 | ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"
18 |
19 | IMAGES_FOLDER = os.path.join(os.path.dirname(
20 | os.path.realpath(__file__)), "images")
21 |
22 | class PredictionResourceMissingError(Exception):
23 | pass
24 |
25 | def train_project(subscription_key):
26 | try:
27 | prediction_resource_id = os.environ[PREDICTION_RESOURCE_ID_KEY_ENV_NAME]
28 | except KeyError:
29 | raise PredictionResourceMissingError("Didn't find a prediction resource to publish to. Please set the {} environment variable".format(PREDICTION_RESOURCE_ID_KEY_ENV_NAME))
30 |
31 | trainer = CustomVisionTrainingClient(subscription_key, endpoint=ENDPOINT)
32 |
33 | # Create a new project
34 | print("Creating project...")
35 | project = trainer.create_project(
36 | SAMPLE_PROJECT_NAME, classification_type=Classifier.multiclass)
37 |
38 | # Make two tags in the new project
39 | hemlock_tag = trainer.create_tag(project.id, "Hemlock")
40 | cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")
41 | pine_needle_tag = trainer.create_tag(project.id, "Pine Needle Leaves")
42 | flat_leaf_tag = trainer.create_tag(project.id, "Flat Leaves")
43 |
44 | print("Adding images...")
45 | hemlock_dir = os.path.join(IMAGES_FOLDER, "Hemlock")
46 | for image in os.listdir(hemlock_dir):
47 | with open(os.path.join(hemlock_dir, image), mode="rb") as img_data:
48 | trainer.create_images_from_data(project.id, img_data.read(), [
49 | hemlock_tag.id, pine_needle_tag.id])
50 |
51 | cherry_dir = os.path.join(IMAGES_FOLDER, "Japanese Cherry")
52 | for image in os.listdir(cherry_dir):
53 | with open(os.path.join(cherry_dir, image), mode="rb") as img_data:
54 | trainer.create_images_from_data(project.id, img_data.read(), [
55 | cherry_tag.id, flat_leaf_tag.id])
56 |
57 | print("Training...")
58 | iteration = trainer.train_project(project.id)
59 | while (iteration.status == "Training"):
60 | iteration = trainer.get_iteration(project.id, iteration.id)
61 | print("Training status: " + iteration.status)
62 | time.sleep(1)
63 |
64 | # The iteration is now trained. Name and publish this iteration to a prediciton endpoint
65 | trainer.publish_iteration(project.id, iteration.id, PUBLISH_ITERATION_NAME, prediction_resource_id)
66 | print ("Done!")
67 |
68 | return project
69 |
70 |
71 | if __name__ == "__main__":
72 | import sys
73 | import os.path
74 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..")))
75 | from tools import execute_samples
76 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
77 |
--------------------------------------------------------------------------------
/samples/vision/custom_vision_training_samples.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
5 |
6 | SUBSCRIPTION_KEY_ENV_NAME = "CUSTOMVISION_TRAINING_KEY"
7 | PREDICTION_RESOURCE_ID_KEY_ENV_NAME = "CUSTOMVISION_PREDICTION_ID"
8 |
9 | SAMPLE_PROJECT_NAME = "Python SDK Sample"
10 | PUBLISH_ITERATION_NAME = "classifyModel"
11 |
12 | # Add your Custom Vision endpoint to your environment variables.
13 | ENDPOINT = os.environ["CUSTOM_VISION_ENDPOINT"]
14 |
15 | IMAGES_FOLDER = os.path.join(os.path.dirname(
16 | os.path.realpath(__file__)), "images")
17 |
18 |
19 | class PredictionResourceMissingError(Exception):
20 | pass
21 |
22 | def train_project(subscription_key):
23 | try:
24 | prediction_resource_id = os.environ[PREDICTION_RESOURCE_ID_KEY_ENV_NAME]
25 | except KeyError:
26 | raise PredictionResourceMissingError("Didn't find a prediction resource to publish to. Please set the {} environment variable".format(PREDICTION_RESOURCE_ID_KEY_ENV_NAME))
27 |
28 | trainer = CustomVisionTrainingClient(subscription_key, endpoint=ENDPOINT)
29 |
30 | # Create a new project
31 | print("Creating project...")
32 | project = trainer.create_project(SAMPLE_PROJECT_NAME)
33 |
34 | # Make two tags in the new project
35 | hemlock_tag = trainer.create_tag(project.id, "Hemlock")
36 | cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")
37 |
38 | print("Adding images...")
39 | hemlock_dir = os.path.join(IMAGES_FOLDER, "Hemlock")
40 | for image in os.listdir(hemlock_dir):
41 | with open(os.path.join(hemlock_dir, image), mode="rb") as img_data:
42 | trainer.create_images_from_data(
43 | project.id, img_data.read(), [hemlock_tag.id])
44 |
45 | cherry_dir = os.path.join(IMAGES_FOLDER, "Japanese Cherry")
46 | for image in os.listdir(cherry_dir):
47 | with open(os.path.join(cherry_dir, image), mode="rb") as img_data:
48 | trainer.create_images_from_data(
49 | project.id, img_data.read(), [cherry_tag.id])
50 |
51 | print("Training...")
52 | iteration = trainer.train_project(project.id)
53 | while (iteration.status == "Training"):
54 | iteration = trainer.get_iteration(project.id, iteration.id)
55 | print("Training status: " + iteration.status)
56 | time.sleep(1)
57 |
58 | # The iteration is now trained. Name and publish this iteration to a prediciton endpoint
59 | trainer.publish_iteration(project.id, iteration.id, PUBLISH_ITERATION_NAME, prediction_resource_id)
60 | print ("Done!")
61 |
62 | return project
63 |
64 |
65 | if __name__ == "__main__":
66 | import sys, os.path
67 | sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
68 | from samples.tools import execute_samples
69 | execute_samples(globals(), SUBSCRIPTION_KEY_ENV_NAME)
70 |
--------------------------------------------------------------------------------
/samples/vision/face_person_group_samples.py:
--------------------------------------------------------------------------------
1 | import os, io, uuid, glob, time
2 | from msrest.authentication import CognitiveServicesCredentials
3 | from azure.cognitiveservices.vision.face import FaceClient
4 | from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
5 |
6 | '''
7 | PersonGroup - Face API sample
8 | References:
9 | How-to guide: https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/howtoidentifyfacesinimage
10 | SDK: https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-face/azure.cognitiveservices.vision.face?view=azure-python
11 | Sample images to download: https://github.com/Microsoft/Cognitive-Face-Windows/tree/master/Data
12 | Prerequisites:
13 | Python 3+
14 | Install Face SDK: pip install azure-cognitiveservices-vision-face
15 | '''
16 | # Group image for testing against
17 | group_photo = 'test-image.jpg'
18 | # To add subdirectories, ex: (os.path.realpath(__file__), "images-directory", "above-images-directory")
19 | IMAGES_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)))
20 |
21 | '''
22 | Authentication
23 | '''
24 | # Replace with a valid subscription key (keeping the quotes in place).
25 | KEY = ''
26 | # Replace westus if it's not your region
27 | BASE_URL = 'https://westus.api.cognitive.microsoft.com'
28 | face_client = FaceClient(BASE_URL, CognitiveServicesCredentials(KEY))
29 |
30 | '''
31 | Create the PersonGroup
32 | '''
33 | # Create empty person group
34 | # person_group_id = str(uuid.uuid4()) # Uncomment to generate a random ID
35 | person_group_id = 'my-unique-person-group'
36 | print(person_group_id)
37 | face_client.person_group.create(person_group_id=person_group_id, name=person_group_id)
38 |
39 | # Define woman friend
40 | woman = face_client.person_group_person.create(person_group_id, "Woman")
41 | # Define man friend
42 | man = face_client.person_group_person.create(person_group_id, "Man")
43 | # Define child friend
44 | child = face_client.person_group_person.create(person_group_id, "Child")
45 |
46 | '''
47 | Detect faces and register to correct person
48 | '''
49 | # Find all jpeg images of friends in working directory
50 | woman_images = [file for file in glob.glob('*.jpg') if file.startswith("woman")]
51 | man_images = [file for file in glob.glob('*.jpg') if file.startswith("man")]
52 | child_images = [file for file in glob.glob('*.jpg') if file.startswith("child")]
53 |
54 | # Add to a woman person
55 | for image in woman_images:
56 | w = open(image, 'r+b')
57 | face_client.person_group_person.add_face_from_stream(person_group_id, woman.person_id, w)
58 |
59 | # Add to a man person
60 | for image in man_images:
61 | m = open(image, 'r+b')
62 | face_client.person_group_person.add_face_from_stream(person_group_id, man.person_id, m)
63 |
64 | # Add to a child person
65 | for image in child_images:
66 | ch = open(image, 'r+b')
67 | face_client.person_group_person.add_face_from_stream(person_group_id, child.person_id, ch)
68 |
69 | '''
70 | Train PersonGroup
71 | '''
72 | # Train the person group
73 | face_client.person_group.train(person_group_id)
74 | training_status = face_client.person_group.get_training_status(person_group_id)
75 | while (training_status.status == TrainingStatusType.running):
76 | print(training_status.status)
77 | if (training_status.status == TrainingStatusType.failed):
78 | raise Exception('Training failed with message {}.'.format(training_status.message))
79 | if (training_status.status == TrainingStatusType.succeeded):
80 | print(training_status.status)
81 | break
82 | time.sleep(1)
83 |
84 | '''
85 | Identify a face against a defined PersonGroup
86 | '''
87 | # Get test image
88 | test_image_array = glob.glob(os.path.join(IMAGES_FOLDER, group_photo))
89 | image = open(test_image_array[0], 'r+b')
90 |
91 | # Detect faces
92 | face_ids = []
93 | faces = face_client.face.detect_with_stream(image)
94 | for face in faces:
95 | face_ids.append(face.face_id)
96 |
97 | # Identify faces
98 | results = face_client.face.identify(face_ids, person_group_id)
99 | if not results:
100 | print('No person identified in the person group for faces from the {}.'.format(os.path.basename(image.name)))
101 | for person in results:
102 | print('Person for face ID {} is identified in {} with a confidence of {}.'.format(person.face_id, os.path.basename(image.name), person.candidates[0].confidence)) # Get topmost confidence score
103 |
104 | # Once finished, since testing, delete the PersonGroup from your resource, otherwise when you create it again, it won't allow duplicate person groups.
105 | face_client.person_group.delete(person_group_id)
106 |
--------------------------------------------------------------------------------
/samples/vision/images/Face/Family1-Dad1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/Family1-Dad1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/Family1-Dad2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/Family1-Dad2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/Family1-Son1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/Family1-Son1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/child1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/child1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/child2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/child2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/child3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/child3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/man1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/man1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/man2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/man2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/man3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/man3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/test-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/test-image.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/woman1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/woman1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/woman2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/woman2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Face/woman3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Face/woman3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_10.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_4.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_5.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_6.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_7.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_8.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Hemlock/hemlock_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Hemlock/hemlock_9.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_10.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_4.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_5.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_6.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_7.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_8.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Japanese Cherry/japanese_cherry_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Japanese Cherry/japanese_cherry_9.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Test/test_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Test/test_image.jpg
--------------------------------------------------------------------------------
/samples/vision/images/Test/test_od_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/Test/test_od_image.jpg
--------------------------------------------------------------------------------
/samples/vision/images/computer_vision_ocr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/computer_vision_ocr.png
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_10.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_11.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_12.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_13.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_14.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_15.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_16.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_17.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_18.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_19.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_20.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_4.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_5.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_6.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_7.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_8.jpg
--------------------------------------------------------------------------------
/samples/vision/images/fork/fork_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/fork/fork_9.jpg
--------------------------------------------------------------------------------
/samples/vision/images/house.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/house.jpg
--------------------------------------------------------------------------------
/samples/vision/images/make_things_happen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/make_things_happen.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_1.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_10.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_11.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_12.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_13.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_14.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_15.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_16.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_17.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_18.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_19.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_2.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_20.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_3.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_4.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_5.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_6.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_7.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_8.jpg
--------------------------------------------------------------------------------
/samples/vision/images/scissors/scissors_9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/images/scissors/scissors_9.jpg
--------------------------------------------------------------------------------
/samples/vision/inkrecognizer_sample.py:
--------------------------------------------------------------------------------
1 | #
2 | import os
3 | try:
4 | from tkinter import *
5 | from tkinter import messagebox
6 | except ImportError:
7 | # python <= 2.7
8 | from Tkinter import *
9 | import tkMessageBox as messagebox
10 |
11 | from collections import namedtuple
12 | from azure.cognitiveservices.inkrecognizer import ApplicationKind, InkStrokeKind
13 | from azure.cognitiveservices.inkrecognizer import InkRecognizerClient
14 |
15 | import logging
16 | logging.basicConfig(level=logging.DEBUG)
17 | #
18 |
19 | # You can also use an Azure credential instance
20 | #
21 | INK_RECOGNIZER_URL = "https://api.cognitive.microsoft.com/inkrecognizer"
22 | KEY = os.environ['INK_RECOGNITION_SUBSCRIPTION_KEY'].strip()
23 |
24 | # The default locale is "en-US". Setting a different language for a stroke will overload this value.
25 | LANGUAGE_RECOGNITION_LOCALE = "en-US"
26 |
27 | # The default ApplicationKind is "MIXED". Specify the kind of strokes being sent to the API with different ApplicationKind values.
28 | # For example, ApplicationKind.WRITING or ApplicationKind.DRAWING
29 |
30 | APPLICATION_KIND = ApplicationKind.MIXED
31 | #
32 |
33 | # Shows simple implementation of InkPoint and InkStroke
34 | #
35 | InkPoint = namedtuple("InkPoint", "x y")
36 |
37 | class InkStroke():
38 | def __init__(self,
39 | ink_stroke_id,
40 | ink_points,
41 | stroke_kind=InkStrokeKind.UNKNOWN,
42 | stroke_language=""):
43 | self.id = ink_stroke_id
44 | self.points = ink_points
45 | self.kind = stroke_kind
46 | self.language = stroke_language
47 | #
48 |
49 | # Wrapper for InkRecognizerClient that shows how to
50 | # (1) Convert stroke unit from pixel to mm
51 | # (2) Set language recognition locale
52 | # (3) Indexing a key word from recognition results
53 | # (4) Set application kind if user know expected type of ink content
54 |
55 |
56 | #
57 | class InkClient:
58 | def __init__(self, url, key):
59 | self._client = InkRecognizerClient(
60 | url,
61 | key,
62 | application_kind=APPLICATION_KIND, # default arguments for each request.
63 | )
64 | def send_request(self, ink_stroke_list):
65 | self._root = None
66 | try:
67 | root = self._client.recognize_ink(
68 | ink_stroke_list, # Arguments for this request only.
69 | language=LANGUAGE_RECOGNITION_LOCALE,
70 | logging_enable=True
71 | )
72 |
73 | result_text = []
74 | for word in root.ink_words:
75 | result_text.append(word.recognized_text)
76 | for shape in root.ink_drawings:
77 | result_text.append(shape.recognized_shape.value)
78 | result_text = "\n".join(result_text)
79 | return result_text
80 | self._root = root
81 | except Exception as e:
82 | messagebox.showinfo("Error", e)
83 | #
84 |
85 | #
86 | class RecognitionManager:
87 | def __init__(self, pixel_per_mm):
88 | self._pixel_per_mm = pixel_per_mm
89 | self._client = InkClient(INK_RECOGNIZER_URL, KEY)
90 | self.reset_ink()
91 |
92 | def _reset_stroke(self):
93 | self._curr_stroke_points = []
94 |
95 | def _pixel_to_mm(self, pixel):
96 | return pixel * 1.0 / self._pixel_per_mm
97 |
98 | def reset_ink(self):
99 | self._ink_stroke_list = []
100 | self._root = None
101 | self._reset_stroke()
102 |
103 | # Convert from pixel to mm before adding to InkPoint.
104 | def add_point(self, x, y):
105 |
106 | self._curr_stroke_points.append(
107 | InkPoint(self._pixel_to_mm(x), self._pixel_to_mm(y)))
108 |
109 | def stroke_end(self):
110 | stroke = InkStroke(len(self._ink_stroke_list), self._curr_stroke_points)
111 | self._ink_stroke_list.append(stroke)
112 | self._reset_stroke()
113 |
114 | def recognize(self):
115 | result_text = self._client.send_request(self._ink_stroke_list)
116 | messagebox.showinfo("Result", result_text)
117 |
118 | def search(self, word):
119 | if self._root is not None:
120 | num_words = len(self._root.find_word(word))
121 | else:
122 | num_words = 0
123 | search_result = "Find %s word%s" % (num_words, "s" if num_words != 1 else "")
124 | messagebox.showinfo("Search Result", search_result)
125 | #
126 |
127 |
128 | #
129 | CANVAS_WIDTH = 800
130 | CANVAS_HEIGHT = 500
131 | STROKE_COLOR = "#476042" # python green
132 | STROKE_WIDTH = 3
133 |
134 | class InkRecognizerDemo:
135 | def __init__(self):
136 | self._master = Tk()
137 | self._pack_widgets()
138 |
139 | self._recognition_manager = RecognitionManager(
140 | pixel_per_mm=self._master.winfo_fpixels("1m"))
141 | # point for drawing stroke
142 | self._last_point = None
143 |
144 | def _pack_widgets(self):
145 | self._master.title("Ink Recognizer Demo")
146 | # search words
147 | self._search_variable = StringVar(value="")
148 | search_entry = Entry(self._master, textvariable=self._search_variable)
149 | search_button = Button(self._master, text="search a word", command=self._search)
150 | search_entry.pack(pady=5)
151 | search_button.pack()
152 | # main canvas
153 | self._canvas = Canvas(
154 | self._master,
155 | width=CANVAS_WIDTH,
156 | height=CANVAS_HEIGHT)
157 | self._canvas.pack(expand=YES, fill = BOTH)
158 | self._canvas.bind("", self._draw)
159 | self._canvas.bind("", self._stroke_start)
160 | self._canvas.bind("", self._stroke_end)
161 | # recognize and clear buttons
162 | recognize_button = Button(
163 | self._master, text="Recognize", command=self._recognize)
164 | recognize_button.pack(pady=5)
165 | clear_button = Button(
166 | self._master, text="Clear", command=self._clear_canvas)
167 | clear_button.pack(pady=5)
168 |
169 | def _draw(self, event):
170 | # paint on canvas
171 | x_curr, y_curr = event.x, event.y
172 | if self._last_point is not None:
173 | x_last, y_last = self._last_point[0], self._last_point[1]
174 | self._canvas.create_line(
175 | x_last, y_last, x_curr, y_curr, fill=STROKE_COLOR, width=STROKE_WIDTH)
176 | self._last_point = x_curr, y_curr
177 | # add point to stroke store
178 | self._recognition_manager.add_point(x_curr, y_curr)
179 |
180 | def _stroke_start(self, event):
181 | # nothing need to do
182 | pass
183 |
184 | def _stroke_end(self, event):
185 | self._recognition_manager.stroke_end()
186 | self._last_point = None
187 |
188 | def _clear_canvas(self):
189 | self._canvas.delete("all")
190 | self._recognition_manager.reset_ink()
191 |
192 | def _recognize(self):
193 | self._recognition_manager.recognize()
194 |
195 | def _search(self):
196 | self._recognition_manager.search(self._search_variable.get())
197 |
198 | def run(self):
199 | mainloop()
200 | #
201 |
202 | #
203 | if __name__ == "__main__":
204 | demo = InkRecognizerDemo()
205 | demo.run()
206 | #
--------------------------------------------------------------------------------
/samples/vision/ocr-data-safety/OCR for data safety and content safety.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "ffdc8f83-db17-42b3-b630-9e739e224c69",
6 | "metadata": {},
7 | "source": [
8 | "# **OCR for Sensitive Data Protection in Images**\n",
9 | " \n",
10 | "This notebook shows an example of how Azure AI OCR can help detect and protect sensitive data embedded in images. Azure AI OCR extracts text from an image of a Social Security Card, the extracted text is then passed to Azure PII detection API. The PII detection API detects, and sensors sensitive text extracted from the image. \n",
11 | "\n",
12 | "Consider using [Vision Studio](https://portal.vision.cognitive.azure.com/gallery/ocr) for a no-code try-out experience for OCR\n",
13 | "\n",
14 | "## **Prerequisites**\n",
15 | "•\tAzure subscription - [Create one for free](https://azure.microsoft.com/en-us/free/ai-services/). \n",
16 | "•\tPython 3.7 or later \n",
17 | "•\tOnce you have your Azure subscription, create an [Azure AI Services Resource](https://ms.portal.azure.com/#view/Microsoft_Azure_Marketplace/GalleryItemDetailsBladeNopdl/id/Microsoft.CognitiveServicesAllInOne/selectionMode~/false/resourceGroupId//resourceGroupLocation//dontDiscardJourney~/false/selectedMenuId/home/launchingContext~/%7B%22galleryItemId%22%3A%22Microsoft.CognitiveServicesAllInOne%22%2C%22source%22%3A%5B%22GalleryFeaturedMenuItemPart%22%2C%22VirtualizedTileDetails%22%5D%2C%22menuItemId%22%3A%22home%22%2C%22subMenuItemId%22%3A%22Search%20results%22%2C%22telemetryId%22%3A%2283634c2f-d125-43ab-97bc-7a640bbe21b8%22%7D/searchTelemetryId/371171fe-5873-4a59-b146-a99c27091437) in the Azure portal to get your key and endpoint. After it deploys, select \"Go to resource\". You'll need the key and endpoint from the resource you create to connect your application to Azure APIs by pasting them into the code below. \n"
18 | ]
19 | },
20 | {
21 | "cell_type": "markdown",
22 | "id": "b5987dd9-57f5-435f-a766-7e08bfdd63bb",
23 | "metadata": {},
24 | "source": [
25 | "## Architectural Diagram"
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "id": "5cd49e5b-c583-4c04-9610-d5583610aaec",
31 | "metadata": {},
32 | "source": [
33 | ""
34 | ]
35 | },
36 | {
37 | "cell_type": "markdown",
38 | "id": "3d69679d-a04a-47f2-a644-564d6dcf9489",
39 | "metadata": {},
40 | "source": [
41 | "## Example Image"
42 | ]
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "id": "b5a32e8c-38c5-49bb-8a10-e1bee8f6a2d2",
47 | "metadata": {},
48 | "source": [
49 | ""
50 | ]
51 | },
52 | {
53 | "cell_type": "markdown",
54 | "id": "faadf9ea-e890-485a-a1bf-b1c252a769cc",
55 | "metadata": {},
56 | "source": [
57 | "## Set up"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "id": "9781de39-d8b7-458d-8855-375996fedb74",
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "pip install azure-ai-vision-imageanalysis"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "id": "d4ee10cf-c8f5-4172-9c86-ca9bb3c3edc3",
74 | "metadata": {},
75 | "outputs": [],
76 | "source": [
77 | "pip install azure-ai-textanalytics==5.2.0"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "id": "a37fa7a9-3a91-4eb7-ae83-336f174a01c1",
83 | "metadata": {},
84 | "source": [
85 | "## Example Code"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "id": "84ceb68a-bbe6-4259-a3fa-7e2b199b8420",
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "import os\n",
96 | "from azure.ai.vision.imageanalysis import ImageAnalysisClient\n",
97 | "from azure.ai.vision.imageanalysis.models import VisualFeatures\n",
98 | "from azure.core.credentials import AzureKeyCredential\n",
99 | "from azure.ai.textanalytics import TextAnalyticsClient\n",
100 | "from azure.core.exceptions import HttpResponseError\n",
101 | "\n",
102 | "# Set the values of your computer vision endpoint and computer vision key\n",
103 | "# as environment variables:\n",
104 | "try:\n",
105 | " endpoint = \"\" #Paste your AI services endpoint here\n",
106 | " key = \"\" #Paste your AI services resource key here\n",
107 | "except KeyError:\n",
108 | " print(\"Missing ENDPOINT' or 'KEY'\")\n",
109 | " print(\"Set them before running this sample.\")\n",
110 | " exit()\n",
111 | "\n",
112 | "# Create an Image Analysis client\n",
113 | "image_analysis_client = ImageAnalysisClient(\n",
114 | " endpoint=endpoint,\n",
115 | " credential=AzureKeyCredential(key)\n",
116 | ")\n",
117 | "\n",
118 | "#Create an Azure Text Analytics client\n",
119 | "text_analytics_client = TextAnalyticsClient(\n",
120 | " endpoint=endpoint, \n",
121 | " credential=AzureKeyCredential(key)\n",
122 | ")\n",
123 | "\n",
124 | "\n",
125 | "# Example method for detecting sensitive information (PII) from text in images \n",
126 | "def pii_recognition_example(client):\n",
127 | "\n",
128 | " #Get text from the image using Image Analysis OCR\n",
129 | " ocr_result = image_analysis_client.analyze_from_url(\n",
130 | " image_url=\"https://resources.ssnsimple.com/wp-content/uploads/2019/11/social-security-number.jpg\",\n",
131 | " visual_features=[VisualFeatures.READ],\n",
132 | ")\n",
133 | " \n",
134 | " documents = [' '.join([line['text'] for line in ocr_result.read.blocks[0].lines])]\n",
135 | " \n",
136 | " print(documents)\n",
137 | "\n",
138 | " #Detect sensitive information in OCR output\n",
139 | " response = text_analytics_client.recognize_pii_entities(documents, language=\"en\")\n",
140 | " result = [doc for doc in response if not doc.is_error]\n",
141 | " \n",
142 | " for doc in result:\n",
143 | " print(\"Redacted Text: {}\".format(doc.redacted_text))\n",
144 | " for entity in doc.entities:\n",
145 | " print(\"Entity: {}\".format(entity.text))\n",
146 | " print(\"\\tCategory: {}\".format(entity.category))\n",
147 | " print(\"\\tConfidence Score: {}\".format(entity.confidence_score))\n",
148 | " print(\"\\tOffset: {}\".format(entity.offset))\n",
149 | " print(\"\\tLength: {}\".format(entity.length))\n",
150 | " \n",
151 | "pii_recognition_example(text_analytics_client) \n"
152 | ]
153 | },
154 | {
155 | "cell_type": "markdown",
156 | "id": "addd1076-efa2-461a-8dba-86b1dead092f",
157 | "metadata": {},
158 | "source": [
159 | "## **Example Use Cases**\n",
160 | "**Data Compliance:** OCR enables automatic detection and redaction of personally identifiable information (PII), financial data, or confidential business information, ensuring compliance with data privacy regulations like GDPR, HIPAA, or PCI-DSS.\n",
161 | "\n",
162 | "**Sensitive Data Detection in Customer Communications:** Emails, forms, and messages may contain sensitive information. OCR-powered systems scan these communications to identify and classify sensitive data, enabling proactive measures to secure and protect customer information from unauthorized access or misuse. Microsoft Purview Communication compliance, for example, uses Azure AI OCR to protect sensitive embedded in emails and Teams messages. \n",
163 | "\n",
164 | "**Secure Information Sharing and Collaboration:** Collaboration with external partners, suppliers, or clients, necessitates the sharing of sensitive documents and images. OCR facilitates secure information sharing by automatically encrypting, redacting, or watermarking sensitive content within shared documents, ensuring data confidentiality and integrity throughout the collaboration process.\n",
165 | "\n",
166 | "**Healthcare Records Management and Compliance:** In the healthcare industry, OCR plays a vital role in managing electronic health records (EHRs) and ensuring compliance with healthcare regulations like HIPAA. OCR technology extracts and analyzes text from medical documents, prescriptions, and patient records, enabling efficient search, indexing, and secure storage of sensitive health information while maintaining patient privacy and confidentiality"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "id": "1ef0820e-59c3-4e4d-8c53-545caabd09ee",
172 | "metadata": {},
173 | "source": [
174 | "## **Next Steps**\n",
175 | "\n",
176 | "**Explore OCR features in Vision and Doc Intelligence Studios:** Try out Azure AI OCR offerings without writing any code on [Vision Studio](https://portal.vision.cognitive.azure.com/gallery/ocr) for image OCR and [Doc Intelligence Studio](https://documentintelligence.ai.azure.com/studio/) for document OCR. \n",
177 | "\n",
178 | "**Public documentation:** Read and review [public documentation](https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/overview-ocr) to start integrating Azure AI OCR into your applications. "
179 | ]
180 | }
181 | ],
182 | "metadata": {
183 | "kernelspec": {
184 | "display_name": "Python 3 (ipykernel)",
185 | "language": "python",
186 | "name": "python3"
187 | },
188 | "language_info": {
189 | "codemirror_mode": {
190 | "name": "ipython",
191 | "version": 3
192 | },
193 | "file_extension": ".py",
194 | "mimetype": "text/x-python",
195 | "name": "python",
196 | "nbconvert_exporter": "python",
197 | "pygments_lexer": "ipython3",
198 | "version": "3.11.2"
199 | }
200 | },
201 | "nbformat": 4,
202 | "nbformat_minor": 5
203 | }
204 |
--------------------------------------------------------------------------------
/samples/vision/ocr-data-safety/diagram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/ocr-data-safety/diagram.jpg
--------------------------------------------------------------------------------
/samples/vision/ocr-data-safety/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-python-sdk-samples/ad8f1e7e19c6e2c7b1d7bf3afa3ce1b5214c59c7/samples/vision/ocr-data-safety/example.png
--------------------------------------------------------------------------------
/samples/vision/text_files/content_moderator_term_list.txt:
--------------------------------------------------------------------------------
1 | This text contains the terms "term1" and "term2".
--------------------------------------------------------------------------------
/samples/vision/text_files/content_moderator_text_moderation.txt:
--------------------------------------------------------------------------------
1 | Is this a grabage email abcdef@abcd.com, phone: 6657789887, IP: 255.255.255.255, 1 Microsoft Way, Redmond, WA 98052.
2 | Crap is the profanity here. Is this information PII? phone 3144444444
--------------------------------------------------------------------------------
/samples/vision/text_files/content_moderator_video_transcript.txt:
--------------------------------------------------------------------------------
1 | WEBVTT
2 |
3 | 01:01.000 --> 02:02.000
4 | First line with a crap word in a transcript.
5 |
6 | 02:03.000 --> 02:25.000
7 | This is another line in the transcript.
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | # Tests folder
2 |
3 | This folder is just the automatic testing system, to be sure that this sample is always up-to-date with the
4 | latest update of the mentionned package.
5 |
6 | This folder is then not part of the samples itself, and is just a generic test launcher.
--------------------------------------------------------------------------------
/tests/test-requirements.txt:
--------------------------------------------------------------------------------
1 | -r ../requirements.txt
2 |
3 | azure-devtools
4 | mock;python_version<="2.7"
5 | nose
6 | six
--------------------------------------------------------------------------------
/tests/test_example.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import unittest
3 |
4 | try:
5 | from unittest import mock
6 | except ImportError:
7 | import mock
8 |
9 | from azure_devtools.scenario_tests import (
10 | ReplayableTest,
11 | )
12 |
13 | from example import run_all_samples
14 |
15 |
16 | TEST_CONFIG = os.path.join(os.path.dirname(__file__), 'testsettings.cfg')
17 |
18 |
19 | class SearchExampleTest(ReplayableTest):
20 | """Simple test launcher for the sample.
21 |
22 | Be sure to set the two environment vaiables before staring it live.
23 | """
24 | FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + \
25 | ['Ocp-Apim-Subscription-Key']
26 |
27 | def test_example(self):
28 | if self.in_recording:
29 | run_all_samples()
30 | else:
31 | with mock.patch.dict('os.environ', {
32 | "ENTITYSEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
33 | "WEBSEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
34 | "SPELLCHECK_SUBSCRIPTION_KEY": "0000000000000000000000000000",
35 | "CUSTOMSEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
36 | "IMAGESEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
37 | "NEWSSEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
38 | "WEBSEARCH_SUBSCRIPTION_KEY": "0000000000000000000000000000",
39 | "CUSTOMVISION_TRAINING_KEY": "0000000000000000000000000000",
40 | "CUSTOMVISION_PREDICTION_KEY": "0000000000000000000000000000",
41 | }):
42 | run_all_samples()
43 |
44 |
45 | if __name__ == '__main__':
46 | unittest.main()
47 |
--------------------------------------------------------------------------------
/tests/testsettings.cfg:
--------------------------------------------------------------------------------
1 | live-mode: false
2 |
--------------------------------------------------------------------------------