├── samples ├── python │ ├── tox.ini │ ├── tensorflow │ │ ├── README.md │ │ ├── classification │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── classification_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection │ │ │ ├── README.md │ │ │ └── predict.py │ │ └── object_detection_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ ├── tensorflow_saved_model │ │ ├── README.md │ │ ├── classification │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── classification_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection │ │ │ ├── README.md │ │ │ └── predict.py │ │ └── object_detection_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ ├── onnx │ │ ├── classification │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── classification_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ └── README.md │ ├── openvino │ │ ├── object_detection_no_postprocess │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection_no_postprocess_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── classification │ │ │ ├── README.md │ │ │ └── predict.py │ │ └── README.md │ ├── coreml │ │ ├── classification │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection │ │ │ ├── README.md │ │ │ └── predict.py │ │ ├── object_detection_s1 │ │ │ ├── README.md │ │ │ └── predict.py │ │ └── README.md │ └── tensorflow_lite │ │ ├── classification │ │ ├── README.md │ │ └── predict.py │ │ ├── object_detection │ │ ├── README.md │ │ └── predict.py │ │ ├── classification_s1 │ │ ├── README.md │ │ └── predict.py │ │ ├── object_detection_s1 │ │ ├── README.md │ │ └── predict.py │ │ └── README.md ├── javascript │ └── tensorflowjs │ │ ├── classification_nodejs │ │ ├── package.json │ │ ├── index.js │ │ └── README.md │ │ ├── objectdetection_nodejs │ │ ├── package.json │ │ ├── index.js │ │ └── README.md │ │ ├── classification │ │ ├── README.md │ │ └── index.html │ │ ├── objectdetection │ │ ├── README.md │ │ └── index.html │ │ └── README.md └── csharp │ ├── onnx │ ├── classification │ │ ├── classification.csproj │ │ ├── README.md │ │ └── Main.cs │ ├── object_detection_s1 │ │ ├── object_detection_s1.csproj │ │ ├── README.md │ │ └── Main.cs │ └── README.md │ └── mlnet │ ├── classification_s1 │ ├── Classification.csproj │ ├── README.md │ └── Program.cs │ ├── object_detection_s1 │ ├── ObjectDetection.csproj │ ├── README.md │ └── Program.cs │ └── README.md ├── .github ├── CODE_OF_CONDUCT.md ├── PULL_REQUEST_TEMPLATE.md └── ISSUE_TEMPLATE.md ├── LICENSE.md ├── README.md ├── CONTRIBUTING.md └── .gitignore /samples/python/tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 200 3 | exclude = build -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/classification_nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "customvision-classification-sample", 3 | "version": "1.0.0", 4 | "description": "Sample scripts for classification", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "MIT", 11 | "dependencies": { 12 | "@microsoft/customvision-tfjs-node": "^1.2.0", 13 | "yargs": "^15.4.1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/objectdetection_nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "customvision-objectdetection-sample", 3 | "version": "1.0.0", 4 | "description": "Sample scripts for Object Detection", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "MIT", 11 | "dependencies": { 12 | "@microsoft/customvision-tfjs-node": "^1.2.0", 13 | "yargs": "^15.4.1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/classification_nodejs/index.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs') 2 | const argv = require('yargs').usage('Usage: $0 ').demandCommand(1).argv; 3 | const cvstfjs = require('@microsoft/customvision-tfjs-node') 4 | 5 | async function run(image_filepath) { 6 | const model = new cvstfjs.ClassificationModel(); 7 | await model.loadModelAsync('file://model.json') 8 | 9 | fs.readFile(image_filepath, async function (err, data) { 10 | if (err) { 11 | throw err; 12 | } 13 | 14 | const result = await model.executeAsync(data); 15 | console.log(result); 16 | }); 17 | } 18 | 19 | run(argv._[0]) 20 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/objectdetection_nodejs/index.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs') 2 | const argv = require('yargs').usage('Usage: $0 ').demandCommand(1).argv; 3 | const cvstfjs = require('@microsoft/customvision-tfjs-node') 4 | 5 | async function run(image_filepath) { 6 | const model = new cvstfjs.ObjectDetectionModel(); 7 | await model.loadModelAsync('file://model.json') 8 | 9 | fs.readFile(image_filepath, async function (err, data) { 10 | if (err) { 11 | throw err; 12 | } 13 | 14 | const result = await model.executeAsync(data); 15 | console.log(result); 16 | }); 17 | } 18 | 19 | run(argv._[0]) 20 | -------------------------------------------------------------------------------- /samples/csharp/onnx/classification/classification.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Exe 5 | net6.0 6 | enable 7 | enable 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /samples/csharp/onnx/object_detection_s1/object_detection_s1.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Exe 5 | net6.0 6 | enable 7 | enable 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /samples/python/tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's TensorFlow FrozenGraph models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | TensorFlow | null | [README](classification_s1) | 6 | | Classification | General (compact) | TensorFlow | null | [README](classification) | 7 | | Object Detection | General (compact) [S1] | TensorFlow | null | [README](object_detection_s1) | 8 | | Object Detection | General (compact) | TensorFlow | null | [README](object_detection) | 9 | 10 | ## Prerequisites 11 | - Python 3.7+ 12 | 13 | ## Setup 14 | ```bash 15 | pip install tensorflow Pillow 16 | ``` 17 | -------------------------------------------------------------------------------- /samples/csharp/mlnet/classification_s1/Classification.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Exe 5 | net6.0 6 | enable 7 | enable 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /samples/csharp/mlnet/object_detection_s1/ObjectDetection.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Exe 5 | net6.0 6 | enable 7 | enable 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /samples/csharp/mlnet/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's ONNX models (ML.NET) 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | ONNX | null | [README](classification_s1) | 6 | | Object Detection | General (compact) [S1] | ONNX | null | [README](object_detection_s1) | 7 | 8 | ## Description 9 | 10 | These samples use Custom Vision ONNX models for classification and object detection tasks in an ML.NET pipeline. 11 | 12 | For more information on ML.NET, see [What is ML.NET?](https://learn.microsoft.com/dotnet/machine-learning/how-does-mldotnet-work). 13 | 14 | ## Prerequisites 15 | 16 | - [.NET 6 SDK](https://dotnet.microsoft.com/en-us/download/dotnet/6.0) 17 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's TensorFlow SavedModel models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowSavedModel | [README](classification_s1) | 6 | | Classification | General (compact) | TensorFlow | TensorFlowSavedModel | [README](classification) | 7 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowSavedModel | [README](object_detection_s1) | 8 | | Object Detection | General (compact) | TensorFlow | TensorFlowSavedModel | [README](object_detection) | 9 | 10 | ## Prerequisites 11 | - Python 3.7+ 12 | 13 | ## Setup 14 | ```bash 15 | pip install tensorflow Pillow 16 | ``` 17 | -------------------------------------------------------------------------------- /samples/python/tensorflow/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow FrozenGraph Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | TensorFlow | null | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up ONNX environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/python/tensorflow/classification_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow FrozenGraph Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | TensorFlow | null | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up ONNX environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/python/tensorflow/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow FrozenGraph Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | TensorFlow | null | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up ONNX environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/python/tensorflow/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow FrozenGraph Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | TensorFlow | null | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up ONNX environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Purpose 2 | 3 | * ... 4 | 5 | ## Pull Request Type 6 | What kind of change does this Pull Request introduce? 7 | 8 | 9 | ``` 10 | [ ] Bugfix 11 | [ ] Feature 12 | [ ] Code style update (formatting, local variables) 13 | [ ] Refactoring (no functional changes, no api changes) 14 | [ ] Documentation content changes 15 | [ ] Other... Please describe: 16 | ``` 17 | 18 | ## How to Test 19 | 20 | ``` 21 | ``` 22 | 23 | ## What to Check 24 | Verify that the following are valid 25 | * ... 26 | 27 | ## Other Information 28 | 29 | -------------------------------------------------------------------------------- /samples/python/onnx/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | ONNX | null | 6 | | Classification | General (compact) | ONNX | OnnxFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. -------------------------------------------------------------------------------- /samples/python/openvino/object_detection_no_postprocess/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's OpenVino NoPostProcess Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | OpenVino | NoPostProcess | 6 | 7 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 8 | 9 | ## Setup 10 | - Follow [README](../README.md) to set up OpenVino environment. 11 | 12 | ## Usage 13 | ``` 14 | python predict.py 15 | ``` 16 | 17 | ## Notes 18 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 19 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow SavedModel Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | TensorFlow | TensorFlowSavedModel | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up TensorFlow environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's TensorFlow.js classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | | ---- | ------ | --------------- | ------------- | 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowJs | 6 | | Classification | General (compact) | TensorFlow | TensorFlowJs | 7 | 8 | This script loads the customvision-tfjs library from CDN. 9 | 10 | ## Setup 11 | 1. Extract models in this directory 12 | ```bash 13 | unzip 14 | ``` 15 | 16 | 2. Serve this directory over HTTP. 17 | 18 | ```bash 19 | # If you have python, 20 | python -m http.server 8080 21 | 22 | # If you have Node.js, 23 | npx http-server -p 8080 24 | ``` 25 | 26 | ## Usage 27 | 1. Open http://localhost:8080/ with your favorite browser. 28 | 2. Choose a test image. 29 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/objectdetection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's TensorFlow.js Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | | ---- | ------ | --------------- | ------------- | 5 | | Object Detection | General (compact) | TensorFlow | TensorFlowJs | 6 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowJs | 7 | 8 | This script loads the customvision-tfjs library from CDN. 9 | 10 | ## Setup 11 | 1. Extract models in this directory 12 | ```bash 13 | unzip 14 | ``` 15 | 16 | 2. Serve this directory over HTTP. 17 | 18 | ```bash 19 | # If you have python, 20 | python -m http.server 8080 21 | 22 | # If you have Node.js, 23 | npx http-server -p 8080 24 | ``` 25 | 26 | ## Usage 27 | 1. Open http://localhost:8080/ with your favorite browser. 28 | 2. Choose a test image. 29 | -------------------------------------------------------------------------------- /samples/python/onnx/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | ONNX | null | 6 | | Object Detection | General (compact) | ONNX | OnnxFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/openvino/object_detection_no_postprocess_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's OpenVino NoPostProcess Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | OpenVino | NoPostProcess | 6 | 7 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 8 | 9 | ## Setup 10 | - Follow [README](../README.md) to set up OpenVino environment. 11 | 12 | ## Usage 13 | ``` 14 | python predict.py 15 | ``` 16 | 17 | ## Notes 18 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 19 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/classification_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow SavedModel Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowSavedModel | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up TensorFlow environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow SavedModel Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | TensorFlow | TensorFlowSavedModel | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up TensorFlow environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/python/coreml/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's CoreML Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | CoreML | null | 6 | | Classification | General (compact) | CoreML | CoreMLFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up CoreML environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow SavedModel Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowSavedModel | 6 | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Set up 11 | - Follow [README](../README.md) to set up TensorFlow environment. 12 | 13 | ## How to use 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/csharp/onnx/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | ONNX | null | 6 | | Object Detection | General (compact) [S1] | ONNX | OnnxFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up C# ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | dotnet run 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/onnx/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | ONNX | null | 6 | | Object Detection | General (compact) [S1] | ONNX | OnnxFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/classification_nodejs/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's TensorFlow.js classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | | ---- | ------ | --------------- | ------------- | 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowJs | 6 | | Classification | General (compact) | TensorFlow | TensorFlowJs | 7 | 8 | 9 | ## Setup 10 | ### Use Docker (Recommended) 11 | ```bash 12 | docker run -it --rm -v :/work node bash 13 | 14 | # Inside the docker, 15 | cd /work 16 | npm install . 17 | ``` 18 | 19 | ### Install Node.js manually 20 | Please follow the [Node.js official documents](https://nodejs.org/en/download/). 21 | 22 | 23 | ## Usage 24 | ```bash 25 | unzip 26 | node . 27 | ``` 28 | 29 | The prediction results will be shown on the stdout. 30 | -------------------------------------------------------------------------------- /samples/python/coreml/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's CoreML Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | CoreML | null | 6 | | Object Detection | General (compact) | CoreML | CoreMLFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up CoreML environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/onnx/classification_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | ONNX | null | 6 | | Classification | General (compact) [S1] | ONNX | OnnxFloat16 | 7 | 8 | 9 | 10 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 11 | 12 | ## Set up 13 | - Follow [README](../README.md) to set up ONNX environment. 14 | 15 | ## How to use 16 | ``` 17 | python predict.py 18 | ``` 19 | 20 | ## Notes 21 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 22 | -------------------------------------------------------------------------------- /samples/python/openvino/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's OpenVino Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | OpenVino | null | 6 | | Object Detection | General (compact) [S1] | OpenVino | null | 7 | 8 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 9 | 10 | ## Setup 11 | - Follow [README](../README.md) to set up OpenVino environment. 12 | 13 | ## Usage 14 | ``` 15 | python predict.py 16 | ``` 17 | 18 | ## Notes 19 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 20 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/objectdetection_nodejs/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's TensorFlow.js object detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | | ---- | ------ | --------------- | ------------- | 5 | | Object Detection | General (compact) | TensorFlow | TensorFlowJs | 6 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowJs | 7 | 8 | 9 | ## Setup 10 | ### Use Docker (Recommended) 11 | ```bash 12 | docker run -it --rm -v :/work node bash 13 | 14 | # Inside the docker, 15 | cd /work 16 | npm install . 17 | ``` 18 | 19 | ### Install Node.js manually 20 | Please follow the [Node.js official documents](https://nodejs.org/en/download/). 21 | 22 | 23 | ## Usage 24 | ```bash 25 | unzip 26 | node . 27 | ``` 28 | 29 | The prediction results will be shown on the stdout. 30 | -------------------------------------------------------------------------------- /samples/python/coreml/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's CoreML Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | CoreML | null | 6 | | Object Detection | General (compact) [S1] | CoreML | CoreMLFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up CoreML environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow Lite Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | TensorFlow | TensorFlowLite | 6 | | Classification | General (compact) | TensorFlow | TensorFlowLiteFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow Lite Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) | TensorFlow | TensorFlowLite | 6 | | Object Detection | General (compact) | TensorFlow | TensorFlowLiteFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/classification_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow Lite Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowLite | 6 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowLiteFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's TensorFlow Lite Object Detection model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowLite | 6 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowLiteFloat16 | 7 | 8 | 9 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 10 | 11 | ## Set up 12 | - Follow [README](../README.md) to set up ONNX environment. 13 | 14 | ## How to use 15 | ``` 16 | python predict.py 17 | ``` 18 | 19 | ## Notes 20 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 21 | -------------------------------------------------------------------------------- /samples/csharp/onnx/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's ONNX classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) | ONNX | null | 6 | | Classification | General (compact) | ONNX | OnnxFloat16 | 7 | | Classification | General (compact) [S1] | ONNX | null | 8 | | Classification | General (compact) [S1] | ONNX | OnnxFloat16 | 9 | 10 | 11 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 12 | 13 | ## Set up 14 | - Follow [README](../README.md) to set up C# ONNX environment. 15 | 16 | ## How to use 17 | ``` 18 | dotnet run 19 | ``` 20 | 21 | ## Notes 22 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 4 | > Please provide us with the following information: 5 | > --------------------------------------------------------------- 6 | 7 | ### This issue is for a: (mark with an `x`) 8 | ``` 9 | - [ ] bug report -> please search issues before submitting 10 | - [ ] feature request 11 | - [ ] documentation issue or request 12 | - [ ] regression (a behavior that used to work and stopped in a new release) 13 | ``` 14 | 15 | ### Minimal steps to reproduce 16 | > 17 | 18 | ### Any log messages given by the failure 19 | > 20 | 21 | ### Expected/desired behavior 22 | > 23 | 24 | ### OS and Version? 25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?) 26 | 27 | ### Mention any other details that might be useful 28 | 29 | > --------------------------------------------------------------- 30 | > Thanks! We'll be in touch soon. 31 | -------------------------------------------------------------------------------- /samples/python/openvino/classification/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for CustomVision's OpenVino Classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | OpenVino | null | 6 | | Classification | General (compact) [S1] | OpenVino | NoPostProcess | 7 | | Classification | General (compact) | OpenVino | null | 8 | | Classification | General (compact) | OpenVino | NoPostProcess | 9 | 10 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 11 | 12 | ## Setup 13 | - Follow [README](../README.md) to set up OpenVino environment. 14 | 15 | ## How to use 16 | ``` 17 | python predict.py 18 | ``` 19 | 20 | ## Notes 21 | There is a slight difference in image preprocessing logic and this script cannot get identical results with Custom Vision's Cloud API. 22 | -------------------------------------------------------------------------------- /samples/csharp/onnx/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's ONNX models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | ONNX | null | [README](classification) | 6 | | Classification | General (compact) [S1] | ONNX | OnnxFloat16 | [README](classification) | 7 | | Classification | General (compact) | ONNX | null | [README](classification) | 8 | | Classification | General (compact) | ONNX | OnnxFloat16 | [README](classification) | 9 | | Object Detection | General (compact) [S1] | ONNX | null | [README](object_detection_s1) | 10 | | Object Detection | General (compact) [S1] | ONNX | OnnxFloat16 | [README](object_detection_s1) | 11 | | Object Detection | General (compact) | ONNX | null | [README](object_detection) | 12 | | Object Detection | General (compact) | ONNX | OnnxFloat16 | [README](object_detection) | 13 | 14 | ## Setup 15 | See [dotnet document](https://dotnet.microsoft.com/en-us/download) to set up .NET 6.0 environment. 16 | -------------------------------------------------------------------------------- /samples/python/onnx/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's ONNX models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | ONNX | null | [README](classification_s1) | 6 | | Classification | General (compact) [S1] | ONNX | OnnxFloat16 | [README](classification_s1) | 7 | | Classification | General (compact) | ONNX | null | [README](classification) | 8 | | Classification | General (compact) | ONNX | OnnxFloat16 | [README](classification) | 9 | | Object Detection | General (compact) [S1] | ONNX | null | [README](object_detection_s1) | 10 | | Object Detection | General (compact) [S1] | ONNX | OnnxFloat16 | [README](object_detection_s1) | 11 | | Object Detection | General (compact) | ONNX | null | [README](object_detection) | 12 | | Object Detection | General (compact) | ONNX | OnnxFloat16 | [README](object_detection) | 13 | 14 | ## Prerequisites 15 | - Python 3.7+ 16 | 17 | ## Setup 18 | ```bash 19 | pip install onnx onnxruntime Pillow 20 | ``` 21 | -------------------------------------------------------------------------------- /samples/python/coreml/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's CoreML models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | CoreML | null | [README](classification) | 6 | | Classification | General (compact) [S1] | CoreML | CoreMLFloat16 | [README](classification) | 7 | | Classification | General (compact) | CoreML | null | [README](classification) | 8 | | Classification | General (compact) | CoreML | CoreMLFloat16 | [README](classification) | 9 | | Object Detection | General (compact) [S1] | CoreML | null | [README](object_detection_s1) | 10 | | Object Detection | General (compact) [S1] | CoreML | CoreMLFloat16 | [README](object_detection_s1) | 11 | | Object Detection | General (compact) | CoreML | null | [README](object_detection) | 12 | | Object Detection | General (compact) | CoreML | CoreMLFloat16 | [README](object_detection) | 13 | 14 | ## Prerequisites 15 | - Python 3.7+ 16 | - MacOS 17 | 18 | ## Setup 19 | ```bash 20 | pip install coremltools Pillow 21 | ``` 22 | -------------------------------------------------------------------------------- /samples/python/openvino/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision OpenVino models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) | OpenVino | null | [README](classification) | 6 | | Classification | General (compact) [S1] | OpenVino | null | [README](classification) | 7 | | Classification | General (compact) | OpenVino | NoPostProcess | [README](classification) | 8 | | Classification | General (compact) [S1] | OpenVino | NoPostProcess | [README](classification) | 9 | | Object Detection | General (compact) | OpenVino | null | [README](object_detection) | 10 | | Object Detection | General (compact) [S1] | OpenVino | null | [README](object_detection) | 11 | | Object Detection | General (compact) | OpenVino | NoPostProcess | [README](object_detection_no_postprocess) | 12 | | Object Detection | General (compact) [S1] | OpenVino | NoPostProcess | [README](object_detection_no_postprocess_s1) | 13 | 14 | # Prerequisites 15 | Python 3.7+ 16 | 17 | # Setup 18 | ```bash 19 | pip install openvino~=2021.3.0 Pillow 20 | ``` 21 | 22 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's TensorFlow.js models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Platform | Link | 4 | | ---- | ------ | --------------- | ------------- | -------- | ---- | 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowjs | Browser, CDN | [README](classification) | 6 | | Classification | General (compact) | TensorFlow | TensorFlowjs | Browser, CDN | [README](classification) | 7 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowJs | Browser, CDN | [README](objectdetection) | 8 | | Object Detection | General (compact) | TensorFlow | TensorFlowJs | Browser, CDN | [README](objectdetection) | 9 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowjs | Node.js | [README](classification_nodejs) | 10 | | Classification | General (compact) | TensorFlow | TensorFlowjs | Node.js | [README](classification_nodejs) | 11 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowJs | Node.js | [README](objectdetection_nodejs) | 12 | | Object Detection | General (compact) | TensorFlow | TensorFlowJs | Node.js | [README](objectdetection_nodejs) | 13 | -------------------------------------------------------------------------------- /samples/csharp/mlnet/object_detection_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's ONNX Object Detection model (ML.NET) 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Object Detection | General (compact) [S1] | ONNX | null | 6 | 7 | ## Description 8 | 9 | This sample is a C# console application that uses a CustomVision ONNX object detection model in an ML.NET pipeline. 10 | 11 | For more information on ML.NET, see [What is ML.NET?](https://learn.microsoft.com/dotnet/machine-learning/how-does-mldotnet-work). 12 | 13 | ## Set up 14 | 15 | See [ML.NET samples README](../README.md) for prerequisites 16 | 17 | ## How to use 18 | 19 | ```bash 20 | dotnet run --image_path --model_path --labels_path --confidence 21 | ``` 22 | 23 | - *image_path*: The file path to the image you want to run inference on 24 | - *model_path*: The file path of the exported ONNX model file 25 | - *labels_path*: The file path of the exported labels file 26 | - (Optional) *confidence*: A float value between 0.0 an 1.0 to determine the confidence score used to filter detected bounding boxes 27 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for Custom Vision's TensorFlow Lite models 2 | 3 | | Task | Domain | Export Platform | Export Flavor | Link | 4 | |------|--------|-----------------|---------------|------| 5 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowLite | [README](classification_s1) | 6 | | Classification | General (compact) [S1] | TensorFlow | TensorFlowLiteFloat16 | [README](classification_s1) | 7 | | Classification | General (compact) | TensorFlow | TensorFlowLite | [README](classification) | 8 | | Classification | General (compact) | TensorFlow | TensorFlowLiteFloat16 | [README](classification) | 9 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowLite | [README](object_detection_s1) | 10 | | Object Detection | General (compact) [S1] | TensorFlow | TensorFlowLiteFloat16 | [README](object_detection_s1) | 11 | | Object Detection | General (compact) | TensorFlow | TensorFlowLite | [README](object_detection) | 12 | | Object Detection | General (compact) | TensorFlow | TensorFlowLiteFloat16 | [README](object_detection) | 13 | 14 | ## Prerequisites 15 | - Python 3.7+ 16 | 17 | ## Setup 18 | ```bash 19 | pip install tensorflow Pillow 20 | ``` 21 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE -------------------------------------------------------------------------------- /samples/csharp/mlnet/classification_s1/README.md: -------------------------------------------------------------------------------- 1 | # Sample script for Custom Vision's ONNX classification model 2 | 3 | | Task | Domain | Export Platform | Export Flavor | 4 | |------|--------|-----------------|---------------| 5 | | Classification | General (compact) [S1] | ONNX | null | 6 | 7 | For the detail of the model export features, please visit [Custom Vision's official documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/). 8 | 9 | ## Description 10 | 11 | This sample is a C# console application that uses a CustomVision ONNX classification model in an ML.NET pipeline. 12 | 13 | For more information on ML.NET, see [What is ML.NET?](https://learn.microsoft.com/dotnet/machine-learning/how-does-mldotnet-work). 14 | 15 | ## Set up 16 | 17 | See [ML.NET samples README](../README.md) for prerequisites 18 | 19 | ## How to use 20 | 21 | ```bash 22 | dotnet run --image_path --model_path --labels_path 23 | ``` 24 | 25 | - *image_path*: The file path to the image you want to run inference on 26 | - *model_path*: The file path of the exported ONNX model file 27 | - *labels_path*: The file path of the exported labels file 28 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/classification/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Custom Vision classification sample 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /samples/python/coreml/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import coremltools 4 | import PIL.Image 5 | 6 | 7 | class Model: 8 | def __init__(self, model_filepath): 9 | self.model = coremltools.models.MLModel(str(model_filepath)) 10 | spec = self.model.get_spec() 11 | assert len(spec.description.input) == 1 12 | input_description = spec.description.input[0] 13 | self.input_name = input_description.name 14 | self.input_shape = (input_description.type.imageType.width, input_description.type.imageType.height) 15 | 16 | def predict(self, image_filepath): 17 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 18 | return self.model.predict({self.input_name: image}) 19 | 20 | 21 | def print_outputs(outputs): 22 | print(outputs) 23 | 24 | 25 | def main(): 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('model_filepath', type=pathlib.Path) 28 | parser.add_argument('image_filepath', type=pathlib.Path) 29 | 30 | args = parser.parse_args() 31 | 32 | model = Model(args.model_filepath) 33 | outputs = model.predict(args.image_filepath) 34 | print_outputs(outputs) 35 | 36 | 37 | if __name__ == '__main__': 38 | main() 39 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/classification_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_dirpath): 10 | model = tensorflow.saved_model.load(str(model_dirpath)) 11 | self.serve = model.signatures['serving_default'] 12 | self.input_shape = self.serve.inputs[0].shape[1:3] 13 | 14 | def predict(self, image_filepath): 15 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 16 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 17 | 18 | input_tensor = tensorflow.convert_to_tensor(input_array) 19 | return self.serve(input_tensor) 20 | 21 | 22 | def print_outputs(outputs): 23 | outputs = list(outputs.values())[0] 24 | for index, score in enumerate(outputs[0]): 25 | print(f"Label: {index}, score: {score:.5f}") 26 | 27 | 28 | def main(): 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('model_dirpath', type=pathlib.Path) 31 | parser.add_argument('image_filepath', type=pathlib.Path) 32 | 33 | args = parser.parse_args() 34 | 35 | if args.model_dirpath.is_file(): 36 | args.model_dirpath = args.model_dirpath.parent 37 | 38 | model = Model(args.model_dirpath) 39 | outputs = model.predict(args.image_filepath) 40 | print_outputs(outputs) 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_dirpath): 10 | model = tensorflow.saved_model.load(str(model_dirpath)) 11 | self.serve = model.signatures['serving_default'] 12 | self.input_shape = self.serve.inputs[0].shape[1:3] 13 | 14 | def predict(self, image_filepath): 15 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 16 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 17 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 18 | 19 | input_tensor = tensorflow.convert_to_tensor(input_array) 20 | return self.serve(input_tensor) 21 | 22 | 23 | def print_outputs(outputs): 24 | outputs = list(outputs.values())[0] 25 | for index, score in enumerate(outputs[0]): 26 | print(f"Label: {index}, score: {score:.5f}") 27 | 28 | 29 | def main(): 30 | parser = argparse.ArgumentParser() 31 | parser.add_argument('model_dirpath', type=pathlib.Path) 32 | parser.add_argument('image_filepath', type=pathlib.Path) 33 | 34 | args = parser.parse_args() 35 | 36 | if args.model_dirpath.is_file(): 37 | args.model_dirpath = args.model_dirpath.parent 38 | 39 | model = Model(args.model_dirpath) 40 | outputs = model.predict(args.image_filepath) 41 | print_outputs(outputs) 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /samples/javascript/tensorflowjs/objectdetection/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Custom Vision classification sample 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/classification_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_filepath): 10 | self.interpreter = tensorflow.lite.Interpreter(model_path=str(model_filepath)) 11 | self.interpreter.allocate_tensors() 12 | 13 | self.input_details = self.interpreter.get_input_details() 14 | self.output_details = self.interpreter.get_output_details() 15 | assert len(self.input_details) == 1 and len(self.output_details) == 1 16 | self.input_shape = self.input_details[0]['shape'][1:3] 17 | 18 | def predict(self, image_filepath): 19 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 20 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 21 | 22 | self.interpreter.set_tensor(self.input_details[0]['index'], input_array) 23 | self.interpreter.invoke() 24 | 25 | return {detail['name']: self.interpreter.get_tensor(detail['index']) for detail in self.output_details} 26 | 27 | 28 | def print_outputs(outputs): 29 | outputs = list(outputs.values())[0] 30 | for index, score in enumerate(outputs[0]): 31 | print(f"Label: {index}, score: {score:.5f}") 32 | 33 | 34 | def main(): 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('model_filepath', type=pathlib.Path) 37 | parser.add_argument('image_filepath', type=pathlib.Path) 38 | 39 | args = parser.parse_args() 40 | 41 | model = Model(args.model_filepath) 42 | outputs = model.predict(args.image_filepath) 43 | print_outputs(outputs) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /samples/python/coreml/object_detection_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import coremltools 4 | import numpy as np 5 | import PIL.Image 6 | 7 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 8 | 9 | 10 | class Model: 11 | def __init__(self, model_filepath): 12 | self.model = coremltools.models.MLModel(str(model_filepath)) 13 | spec = self.model.get_spec() 14 | input_description = spec.description.input[0] 15 | self.input_name = input_description.name 16 | self.input_shape = (input_description.type.imageType.width, input_description.type.imageType.height) 17 | 18 | def predict(self, image_filepath): 19 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 20 | return self.model.predict({self.input_name: image}) 21 | 22 | 23 | def print_outputs(outputs): 24 | assert set(outputs.keys()) == set(['coordinates', 'confidence']) 25 | for box, scores in zip(outputs['coordinates'], outputs['confidence']): 26 | class_id = np.argmax(scores) 27 | score = np.max(scores) 28 | box = [box[0], box[1], box[0] + box[2], box[1] + box[3]] 29 | if score > PROB_THRESHOLD: 30 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 31 | 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser() 35 | parser.add_argument('model_filepath', type=pathlib.Path) 36 | parser.add_argument('image_filepath', type=pathlib.Path) 37 | 38 | args = parser.parse_args() 39 | 40 | model = Model(args.model_filepath) 41 | outputs = model.predict(args.image_filepath) 42 | print_outputs(outputs) 43 | 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_filepath): 10 | self.interpreter = tensorflow.lite.Interpreter(model_path=str(model_filepath)) 11 | self.interpreter.allocate_tensors() 12 | 13 | self.input_details = self.interpreter.get_input_details() 14 | self.output_details = self.interpreter.get_output_details() 15 | assert len(self.input_details) == 1 and len(self.output_details) == 1 16 | self.input_shape = self.input_details[0]['shape'][1:3] 17 | 18 | def predict(self, image_filepath): 19 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 20 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 21 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 22 | 23 | self.interpreter.set_tensor(self.input_details[0]['index'], input_array) 24 | self.interpreter.invoke() 25 | 26 | return {detail['name']: self.interpreter.get_tensor(detail['index']) for detail in self.output_details} 27 | 28 | 29 | def print_outputs(outputs): 30 | outputs = list(outputs.values())[0] 31 | for index, score in enumerate(outputs[0]): 32 | print(f"Label: {index}, score: {score:.5f}") 33 | 34 | 35 | def main(): 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument('model_filepath', type=pathlib.Path) 38 | parser.add_argument('image_filepath', type=pathlib.Path) 39 | 40 | args = parser.parse_args() 41 | 42 | model = Model(args.model_filepath) 43 | outputs = model.predict(args.image_filepath) 44 | print_outputs(outputs) 45 | 46 | 47 | if __name__ == '__main__': 48 | main() 49 | -------------------------------------------------------------------------------- /samples/python/openvino/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | from openvino.inference_engine import IECore 6 | 7 | 8 | class Model: 9 | def __init__(self, xml_filepath, bin_filepath): 10 | ie = IECore() 11 | net = ie.read_network(str(xml_filepath), str(bin_filepath)) 12 | assert len(net.input_info) == 1 13 | 14 | self.exec_net = ie.load_network(network=net, device_name='CPU') 15 | self.input_name = list(net.input_info.keys())[0] 16 | self.input_shape = net.input_info[self.input_name].input_data.shape[2:] 17 | self.output_names = list(net.outputs.keys()) 18 | 19 | def predict(self, image_filepath): 20 | # The model requires RGB[0-1] NCHW input. 21 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 22 | input_array = np.array(image)[np.newaxis, :, :, :] 23 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 24 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 25 | 26 | return self.exec_net.infer(inputs={self.input_name: input_array}) 27 | 28 | 29 | def print_outputs(outputs): 30 | outputs = list(outputs.values())[0] 31 | for index, score in enumerate(outputs[0]): 32 | print(f"Label: {index}, score: {score:.5f}") 33 | 34 | 35 | def main(): 36 | parser = argparse.ArgumentParser() 37 | parser.add_argument('xml_filepath', type=pathlib.Path) 38 | parser.add_argument('bin_filepath', type=pathlib.Path) 39 | parser.add_argument('image_filepath', type=pathlib.Path) 40 | 41 | args = parser.parse_args() 42 | 43 | model = Model(args.xml_filepath, args.bin_filepath) 44 | outputs = model.predict(args.image_filepath) 45 | print_outputs(outputs) 46 | 47 | 48 | if __name__ == '__main__': 49 | main() 50 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/object_detection_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 8 | 9 | 10 | class Model: 11 | def __init__(self, model_dirpath): 12 | model = tensorflow.saved_model.load(str(model_dirpath)) 13 | self.serve = model.signatures['serving_default'] 14 | self.input_shape = self.serve.inputs[0].shape[1:3] 15 | 16 | def predict(self, image_filepath): 17 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 18 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 19 | 20 | input_tensor = tensorflow.convert_to_tensor(input_array) 21 | outputs = self.serve(input_tensor) 22 | return {k: v[np.newaxis, ...] for k, v in outputs.items()} 23 | 24 | 25 | def print_outputs(outputs): 26 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 27 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 28 | if score > PROB_THRESHOLD: 29 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 30 | 31 | 32 | def main(): 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument('model_dirpath', type=pathlib.Path) 35 | parser.add_argument('image_filepath', type=pathlib.Path) 36 | 37 | args = parser.parse_args() 38 | 39 | if args.model_dirpath.is_file(): 40 | args.model_dirpath = args.model_dirpath.parent 41 | 42 | model = Model(args.model_dirpath) 43 | outputs = model.predict(args.image_filepath) 44 | print_outputs(outputs) 45 | 46 | 47 | if __name__ == '__main__': 48 | main() 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sample scripts for exported models from Custom Vision Service. 2 | 3 | This repository contains samples scripts to use exported models from [Custom Vision Service](https://customvision.ai). 4 | 5 | 6 | | Language | Model type | Link | 7 | | -------- | -------- | ---- | 8 | | C# | ONNX | [README](samples/csharp/onnx) | 9 | | C# | ONNX & ML.NET | [README](samples/csharp/mlnet) | 10 | | Javascript | TensorFlow.js | [README](samples/javascript/tensorflowjs) | 11 | | Python | CoreML | [README](samples/python/coreml) | 12 | | Python | ONNX | [README](samples/python/onnx) | 13 | | Python | OpenVino | [README](samples/python/openvino) | 14 | | Python | TensorFlow (Frozen Graph) [^1] | [README](samples/python/tensorflow) | 15 | | Python | TensorFlow (Saved Model) | [README](samples/python/tensorflow_saved_model) | 16 | | Python | TensorFlow Lite | [README](samples/python/tensorflow_lite) | 17 | 18 | [^1]: This is the default export flavor for TensorFlow platform. 19 | 20 | ## How to export a model from Custom Vision Service? 21 | Please see this [document](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/export-your-model). 22 | 23 | 24 | ## Notes 25 | Those sample scripts are not aiming to get identical results with Custom Vision's prediction APIs. There are slight differences in the pre-processing logic, which cause small difference in the inference results. 26 | 27 | 28 | ## Related sample projects 29 | | Language | Platform | Repository | 30 | | -------- | -------- | ---------- | 31 | | Java | Android | https://github.com/Azure-Samples/cognitive-services-android-customvision-sample | 32 | | Swift, Objective-C | iOS | https://github.com/Azure-Samples/cognitive-services-ios-customvision-sample | 33 | 34 | ## Resources 35 | * [Custom Vision Service documents](https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/) 36 | 37 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/object_detection_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 8 | 9 | 10 | class Model: 11 | def __init__(self, model_filepath): 12 | self.interpreter = tensorflow.lite.Interpreter(model_path=str(model_filepath)) 13 | self.interpreter.allocate_tensors() 14 | 15 | self.input_details = self.interpreter.get_input_details() 16 | self.output_details = self.interpreter.get_output_details() 17 | assert len(self.input_details) == 1 and len(self.output_details) == 3 18 | self.input_shape = self.input_details[0]['shape'][1:3] 19 | 20 | def predict(self, image_filepath): 21 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 22 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 23 | 24 | self.interpreter.set_tensor(self.input_details[0]['index'], input_array) 25 | self.interpreter.invoke() 26 | 27 | return {detail['name']: self.interpreter.get_tensor(detail['index'])[np.newaxis, ...] for detail in self.output_details} 28 | 29 | 30 | def print_outputs(outputs): 31 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 32 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 33 | if score > PROB_THRESHOLD: 34 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 35 | 36 | 37 | def main(): 38 | parser = argparse.ArgumentParser() 39 | parser.add_argument('model_filepath', type=pathlib.Path) 40 | parser.add_argument('image_filepath', type=pathlib.Path) 41 | 42 | args = parser.parse_args() 43 | 44 | model = Model(args.model_filepath) 45 | outputs = model.predict(args.image_filepath) 46 | print_outputs(outputs) 47 | 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /samples/python/openvino/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | from openvino.inference_engine import IECore 6 | 7 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 8 | 9 | 10 | class Model: 11 | def __init__(self, xml_filepath, bin_filepath): 12 | ie = IECore() 13 | net = ie.read_network(str(xml_filepath), str(bin_filepath)) 14 | assert len(net.input_info) == 1 15 | 16 | self.exec_net = ie.load_network(network=net, device_name='CPU') 17 | self.input_name = list(net.input_info.keys())[0] 18 | self.input_shape = net.input_info[self.input_name].input_data.shape[2:] 19 | self.output_names = list(net.outputs.keys()) 20 | 21 | def predict(self, image_filepath): 22 | # The model requires RGB[0-1] NCHW input. 23 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 24 | input_array = np.array(image)[np.newaxis, :, :, :] 25 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 26 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 27 | 28 | return self.exec_net.infer(inputs={self.input_name: input_array}) 29 | 30 | 31 | def print_outputs(outputs): 32 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 33 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 34 | if score > PROB_THRESHOLD: 35 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 36 | 37 | 38 | def main(): 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument('xml_filepath', type=pathlib.Path) 41 | parser.add_argument('bin_filepath', type=pathlib.Path) 42 | parser.add_argument('image_filepath', type=pathlib.Path) 43 | 44 | args = parser.parse_args() 45 | 46 | model = Model(args.xml_filepath, args.bin_filepath) 47 | outputs = model.predict(args.image_filepath) 48 | print_outputs(outputs) 49 | 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /samples/python/onnx/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import onnx 5 | import onnxruntime 6 | import PIL.Image 7 | 8 | 9 | class Model: 10 | def __init__(self, model_filepath): 11 | self.session = onnxruntime.InferenceSession(str(model_filepath)) 12 | assert len(self.session.get_inputs()) == 1 13 | self.input_shape = self.session.get_inputs()[0].shape[2:] 14 | self.input_name = self.session.get_inputs()[0].name 15 | self.input_type = {'tensor(float)': np.float32, 'tensor(float16)': np.float16}[self.session.get_inputs()[0].type] 16 | self.output_names = [o.name for o in self.session.get_outputs()] 17 | 18 | self.is_bgr = False 19 | self.is_range255 = False 20 | onnx_model = onnx.load(model_filepath) 21 | for metadata in onnx_model.metadata_props: 22 | if metadata.key == 'Image.BitmapPixelFormat' and metadata.value == 'Bgr8': 23 | self.is_bgr = True 24 | elif metadata.key == 'Image.NominalPixelRange' and metadata.value == 'NominalRange_0_255': 25 | self.is_range255 = True 26 | 27 | def predict(self, image_filepath): 28 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 29 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 30 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 31 | if self.is_bgr: 32 | input_array = input_array[:, (2, 1, 0), :, :] 33 | if not self.is_range255: 34 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 35 | 36 | outputs = self.session.run(self.output_names, {self.input_name: input_array.astype(self.input_type)}) 37 | return {name: outputs[i] for i, name in enumerate(self.output_names)} 38 | 39 | 40 | def print_outputs(outputs): 41 | print(outputs) 42 | 43 | 44 | def main(): 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument('model_filepath', type=pathlib.Path) 47 | parser.add_argument('image_filepath', type=pathlib.Path) 48 | 49 | args = parser.parse_args() 50 | 51 | model = Model(args.model_filepath) 52 | outputs = model.predict(args.image_filepath) 53 | print_outputs(outputs) 54 | 55 | 56 | if __name__ == '__main__': 57 | main() 58 | -------------------------------------------------------------------------------- /samples/python/onnx/classification_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import onnx 5 | import onnxruntime 6 | import PIL.Image 7 | 8 | 9 | class Model: 10 | def __init__(self, model_filepath): 11 | self.session = onnxruntime.InferenceSession(str(model_filepath)) 12 | assert len(self.session.get_inputs()) == 1 13 | self.input_shape = self.session.get_inputs()[0].shape[2:] 14 | self.input_name = self.session.get_inputs()[0].name 15 | self.input_type = {'tensor(float)': np.float32, 'tensor(float16)': np.float16}[self.session.get_inputs()[0].type] 16 | self.output_names = [o.name for o in self.session.get_outputs()] 17 | 18 | self.is_bgr = False 19 | self.is_range255 = False 20 | onnx_model = onnx.load(model_filepath) 21 | for metadata in onnx_model.metadata_props: 22 | if metadata.key == 'Image.BitmapPixelFormat' and metadata.value == 'Bgr8': 23 | self.is_bgr = True 24 | elif metadata.key == 'Image.NominalPixelRange' and metadata.value == 'NominalRange_0_255': 25 | self.is_range255 = True 26 | 27 | def predict(self, image_filepath): 28 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 29 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 30 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 31 | if self.is_bgr: 32 | input_array = input_array[:, (2, 1, 0), :, :] 33 | if not self.is_range255: 34 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 35 | 36 | outputs = self.session.run(self.output_names, {self.input_name: input_array.astype(self.input_type)}) 37 | return {name: outputs[i] for i, name in enumerate(self.output_names)} 38 | 39 | 40 | def print_outputs(outputs): 41 | outputs = list(outputs.values())[0] 42 | for index, score in enumerate(outputs[0]): 43 | print(f"Label: {index}, score: {score:.5f}") 44 | 45 | 46 | def main(): 47 | parser = argparse.ArgumentParser() 48 | parser.add_argument('model_filepath', type=pathlib.Path) 49 | parser.add_argument('image_filepath', type=pathlib.Path) 50 | 51 | args = parser.parse_args() 52 | 53 | model = Model(args.model_filepath) 54 | outputs = model.predict(args.image_filepath) 55 | print_outputs(outputs) 56 | 57 | 58 | if __name__ == '__main__': 59 | main() 60 | -------------------------------------------------------------------------------- /samples/python/tensorflow/classification_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_filepath): 10 | self.graph_def = tensorflow.compat.v1.GraphDef() 11 | self.graph_def.ParseFromString(model_filepath.read_bytes()) 12 | 13 | input_names, self.output_names = self._get_graph_inout(self.graph_def) 14 | assert len(input_names) == 1 15 | self.input_name = input_names[0] 16 | self.input_shape = self._get_input_shape(self.graph_def, self.input_name) 17 | 18 | def predict(self, image_filepath): 19 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 20 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 21 | 22 | with tensorflow.compat.v1.Session() as sess: 23 | tensorflow.import_graph_def(self.graph_def, name='') 24 | out_tensors = [sess.graph.get_tensor_by_name(o + ':0') for o in self.output_names] 25 | outputs = sess.run(out_tensors, {self.input_name + ':0': input_array}) 26 | 27 | return {name: outputs[i] for i, name in enumerate(self.output_names)} 28 | 29 | @staticmethod 30 | def _get_graph_inout(graph_def): 31 | input_names = [] 32 | inputs_set = set() 33 | outputs_set = set() 34 | 35 | for node in graph_def.node: 36 | if node.op == 'Placeholder': 37 | input_names.append(node.name) 38 | 39 | for i in node.input: 40 | inputs_set.add(i.split(':')[0]) 41 | outputs_set.add(node.name) 42 | 43 | output_names = list(outputs_set - inputs_set) 44 | return input_names, output_names 45 | 46 | @staticmethod 47 | def _get_input_shape(graph_def, input_name): 48 | for node in graph_def.node: 49 | if node.name == input_name: 50 | return [dim.size for dim in node.attr['shape'].shape.dim][1:3] 51 | 52 | 53 | def print_outputs(outputs): 54 | outputs = list(outputs.values())[0] 55 | for index, score in enumerate(outputs[0]): 56 | print(f"Label: {index}, score: {score:.5f}") 57 | 58 | 59 | def main(): 60 | parser = argparse.ArgumentParser() 61 | parser.add_argument('model_filepath', type=pathlib.Path) 62 | parser.add_argument('image_filepath', type=pathlib.Path) 63 | 64 | args = parser.parse_args() 65 | 66 | model = Model(args.model_filepath) 67 | outputs = model.predict(args.image_filepath) 68 | print_outputs(outputs) 69 | 70 | 71 | if __name__ == '__main__': 72 | main() 73 | -------------------------------------------------------------------------------- /samples/python/tensorflow/classification/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import tensorflow 5 | import PIL.Image 6 | 7 | 8 | class Model: 9 | def __init__(self, model_filepath): 10 | self.graph_def = tensorflow.compat.v1.GraphDef() 11 | self.graph_def.ParseFromString(model_filepath.read_bytes()) 12 | 13 | input_names, self.output_names = self._get_graph_inout(self.graph_def) 14 | assert len(input_names) == 1 15 | self.input_name = input_names[0] 16 | self.input_shape = self._get_input_shape(self.graph_def, self.input_name) 17 | 18 | def predict(self, image_filepath): 19 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 20 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 21 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 22 | 23 | with tensorflow.compat.v1.Session() as sess: 24 | tensorflow.import_graph_def(self.graph_def, name='') 25 | out_tensors = [sess.graph.get_tensor_by_name(o + ':0') for o in self.output_names] 26 | outputs = sess.run(out_tensors, {self.input_name + ':0': input_array}) 27 | 28 | return {name: outputs[i] for i, name in enumerate(self.output_names)} 29 | 30 | @staticmethod 31 | def _get_graph_inout(graph_def): 32 | input_names = [] 33 | inputs_set = set() 34 | outputs_set = set() 35 | 36 | for node in graph_def.node: 37 | if node.op == 'Placeholder': 38 | input_names.append(node.name) 39 | 40 | for i in node.input: 41 | inputs_set.add(i.split(':')[0]) 42 | outputs_set.add(node.name) 43 | 44 | output_names = list(outputs_set - inputs_set) 45 | return input_names, output_names 46 | 47 | @staticmethod 48 | def _get_input_shape(graph_def, input_name): 49 | for node in graph_def.node: 50 | if node.name == input_name: 51 | return [dim.size for dim in node.attr['shape'].shape.dim][1:3] 52 | 53 | 54 | def print_outputs(outputs): 55 | outputs = list(outputs.values())[0] 56 | for index, score in enumerate(outputs[0]): 57 | print(f"Label: {index}, score: {score:.5f}") 58 | 59 | 60 | def main(): 61 | parser = argparse.ArgumentParser() 62 | parser.add_argument('model_filepath', type=pathlib.Path) 63 | parser.add_argument('image_filepath', type=pathlib.Path) 64 | 65 | args = parser.parse_args() 66 | 67 | model = Model(args.model_filepath) 68 | outputs = model.predict(args.image_filepath) 69 | print_outputs(outputs) 70 | 71 | 72 | if __name__ == '__main__': 73 | main() 74 | -------------------------------------------------------------------------------- /samples/python/onnx/object_detection_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import onnx 5 | import onnxruntime 6 | import PIL.Image 7 | 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | 10 | 11 | class Model: 12 | def __init__(self, model_filepath): 13 | self.session = onnxruntime.InferenceSession(str(model_filepath)) 14 | assert len(self.session.get_inputs()) == 1 15 | self.input_shape = self.session.get_inputs()[0].shape[2:] 16 | self.input_name = self.session.get_inputs()[0].name 17 | self.input_type = {'tensor(float)': np.float32, 'tensor(float16)': np.float16}[self.session.get_inputs()[0].type] 18 | self.output_names = [o.name for o in self.session.get_outputs()] 19 | 20 | self.is_bgr = False 21 | self.is_range255 = False 22 | onnx_model = onnx.load(model_filepath) 23 | for metadata in onnx_model.metadata_props: 24 | if metadata.key == 'Image.BitmapPixelFormat' and metadata.value == 'Bgr8': 25 | self.is_bgr = True 26 | elif metadata.key == 'Image.NominalPixelRange' and metadata.value == 'NominalRange_0_255': 27 | self.is_range255 = True 28 | 29 | def predict(self, image_filepath): 30 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 31 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 32 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 33 | if self.is_bgr: 34 | input_array = input_array[:, (2, 1, 0), :, :] 35 | if not self.is_range255: 36 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 37 | 38 | outputs = self.session.run(self.output_names, {self.input_name: input_array.astype(self.input_type)}) 39 | return {name: outputs[i] for i, name in enumerate(self.output_names)} 40 | 41 | 42 | def print_outputs(outputs): 43 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 44 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 45 | if score > PROB_THRESHOLD: 46 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 47 | 48 | 49 | def main(): 50 | parser = argparse.ArgumentParser() 51 | parser.add_argument('model_filepath', type=pathlib.Path) 52 | parser.add_argument('image_filepath', type=pathlib.Path) 53 | 54 | args = parser.parse_args() 55 | 56 | model = Model(args.model_filepath) 57 | outputs = model.predict(args.image_filepath) 58 | print_outputs(outputs) 59 | 60 | 61 | if __name__ == '__main__': 62 | main() 63 | -------------------------------------------------------------------------------- /samples/python/tensorflow/object_detection_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 8 | 9 | 10 | class Model: 11 | def __init__(self, model_filepath): 12 | self.graph_def = tensorflow.compat.v1.GraphDef() 13 | self.graph_def.ParseFromString(model_filepath.read_bytes()) 14 | 15 | input_names, self.output_names = self._get_graph_inout(self.graph_def) 16 | assert len(input_names) == 1 and len(self.output_names) == 3 17 | self.input_name = input_names[0] 18 | self.input_shape = self._get_input_shape(self.graph_def, self.input_name) 19 | 20 | def predict(self, image_filepath): 21 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 22 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 23 | 24 | with tensorflow.compat.v1.Session() as sess: 25 | tensorflow.import_graph_def(self.graph_def, name='') 26 | out_tensors = [sess.graph.get_tensor_by_name(o + ':0') for o in self.output_names] 27 | outputs = sess.run(out_tensors, {self.input_name + ':0': input_array}) 28 | return {name: outputs[i][np.newaxis, ...] for i, name in enumerate(self.output_names)} 29 | 30 | @staticmethod 31 | def _get_graph_inout(graph_def): 32 | input_names = [] 33 | inputs_set = set() 34 | outputs_set = set() 35 | 36 | for node in graph_def.node: 37 | if node.op == 'Placeholder': 38 | input_names.append(node.name) 39 | 40 | for i in node.input: 41 | inputs_set.add(i.split(':')[0]) 42 | outputs_set.add(node.name) 43 | 44 | output_names = list(outputs_set - inputs_set) 45 | return input_names, output_names 46 | 47 | @staticmethod 48 | def _get_input_shape(graph_def, input_name): 49 | for node in graph_def.node: 50 | if node.name == input_name: 51 | return [dim.size for dim in node.attr['shape'].shape.dim][1:3] 52 | 53 | 54 | def print_outputs(outputs): 55 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 56 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 57 | if score > PROB_THRESHOLD: 58 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 59 | 60 | 61 | def main(): 62 | parser = argparse.ArgumentParser() 63 | parser.add_argument('model_filepath', type=pathlib.Path) 64 | parser.add_argument('image_filepath', type=pathlib.Path) 65 | 66 | args = parser.parse_args() 67 | 68 | model = Model(args.model_filepath) 69 | outputs = model.predict(args.image_filepath) 70 | print_outputs(outputs) 71 | 72 | 73 | if __name__ == '__main__': 74 | main() 75 | -------------------------------------------------------------------------------- /samples/csharp/onnx/object_detection_s1/Main.cs: -------------------------------------------------------------------------------- 1 | using System.CommandLine; 2 | using System.IO; 3 | using Microsoft.ML.OnnxRuntime; 4 | using Microsoft.ML.OnnxRuntime.Tensors; 5 | using SixLabors.ImageSharp; 6 | using SixLabors.ImageSharp.PixelFormats; 7 | using SixLabors.ImageSharp.Processing; 8 | 9 | 10 | namespace CustomVision 11 | { 12 | class Program 13 | { 14 | static async Task Main(string[] args) 15 | { 16 | var modelFilepathArgument = new Argument("model_filepath"); 17 | var imageFilepathArgument = new Argument("image_filepath"); 18 | var command = new RootCommand 19 | { 20 | modelFilepathArgument, 21 | imageFilepathArgument 22 | }; 23 | 24 | command.SetHandler((FileInfo modelFilepath, FileInfo imageFilepath) => { 25 | Predict(modelFilepath, imageFilepath); 26 | }, modelFilepathArgument, imageFilepathArgument); 27 | 28 | await command.InvokeAsync(args); 29 | } 30 | 31 | static void Predict(FileInfo modelFilepath, FileInfo imageFilepath) 32 | { 33 | var session = new InferenceSession(modelFilepath.ToString()); 34 | 35 | Tensor tensor = LoadInputTensor(imageFilepath, 320); 36 | var inputs = new List 37 | { 38 | NamedOnnxValue.CreateFromTensor("image_tensor", tensor) 39 | }; 40 | 41 | var resultsCollection = session.Run(inputs); 42 | var resultsDict = resultsCollection.ToDictionary(x => x.Name, x => x); 43 | var detectedBoxes = resultsDict["detected_boxes"].AsTensor(); 44 | var detectedClasses = resultsDict["detected_classes"].AsTensor(); 45 | var detectedScores = resultsDict["detected_scores"].AsTensor(); 46 | 47 | var numBoxes = detectedClasses.Length; 48 | 49 | for (var i = 0; i < numBoxes; i++) { 50 | var score = detectedScores[0, i]; 51 | var classId = detectedClasses[0, i]; 52 | var x = detectedBoxes[0, i, 0]; 53 | var y = detectedBoxes[0, i, 1]; 54 | var x2 = detectedBoxes[0, i, 2]; 55 | var y2 = detectedBoxes[0, i, 3]; 56 | Console.WriteLine("Label: {0}, Probability: {1}, Box: ({2}, {3}) ({4}, {5})", classId, score, x, y, x2, y2); 57 | } 58 | } 59 | 60 | // Load an image file and create a RGB[0-255] tensor. 61 | static Tensor LoadInputTensor(FileInfo imageFilepath, int imageSize) 62 | { 63 | var input = new DenseTensor(new[] {1, 3, imageSize, imageSize}); 64 | using (var image = Image.Load(imageFilepath.ToString())) 65 | { 66 | image.Mutate(x => x.Resize(imageSize, imageSize)); 67 | 68 | for (int y = 0; y < image.Height; y++) 69 | { 70 | Span pixelSpan = image.GetPixelRowSpan(y); 71 | for (int x = 0; x < image.Width; x++) 72 | { 73 | input[0, 0, y, x] = pixelSpan[x].R; 74 | input[0, 1, y, x] = pixelSpan[x].G; 75 | input[0, 2, y, x] = pixelSpan[x].B; 76 | } 77 | } 78 | } 79 | return input; 80 | } 81 | } 82 | } -------------------------------------------------------------------------------- /samples/csharp/mlnet/classification_s1/Program.cs: -------------------------------------------------------------------------------- 1 | using Microsoft.ML; 2 | using Microsoft.ML.Data; 3 | using static Microsoft.ML.Transforms.Image.ImagePixelExtractingEstimator; 4 | using System.CommandLine; 5 | 6 | // Command Line Config 7 | var imagePathOption = new Option(name:"--image_path",description:"The path of the image to run inference on,"); 8 | var modelPathOption = new Option(name:"--model_path", description:"The path of the ONNX model used for inferencing."); 9 | var labelPathOption = new Option(name:"--labels_path",description: "The path of the labels file for your ONNX model."); 10 | 11 | var rootCommand = new RootCommand("Sample application to run inferencing using an ML.NET pipeline and an Azure Custom Vision ONNX model"); 12 | 13 | rootCommand.AddOption(imagePathOption); 14 | rootCommand.AddOption(modelPathOption); 15 | rootCommand.AddOption(labelPathOption); 16 | 17 | var CLIHandler = (string image, string model, string labels) => 18 | { 19 | 20 | if(image == null) 21 | { 22 | Console.WriteLine("Missing --image_path parameter"); 23 | return; 24 | } 25 | 26 | if(model == null) 27 | { 28 | Console.WriteLine("Missing --model_path parameter"); 29 | return; 30 | } 31 | 32 | if(labels == null) 33 | { 34 | Console.WriteLine("Missing --labels_path parameter"); 35 | return; 36 | } 37 | 38 | RunInference(image!, model!, labels!); 39 | }; 40 | 41 | rootCommand.SetHandler(CLIHandler, imagePathOption, modelPathOption, labelPathOption); 42 | 43 | await rootCommand.InvokeAsync(args); 44 | 45 | static void RunInference(string imagePath, string modelPath, string labelPath) 46 | { 47 | // Initialize MLContext 48 | var ctx = new MLContext(); 49 | 50 | // Load labels 51 | var labels = File.ReadAllLines(labelPath); 52 | 53 | // Define inferencing pipeline 54 | var pipeline = 55 | ctx.Transforms.LoadImages(outputColumnName: "Image",null,inputColumnName:"ImagePath") 56 | .Append(ctx.Transforms.ResizeImages(outputColumnName: "ResizedImage", imageWidth: 300, imageHeight: 300, inputColumnName: "Image", resizing: Microsoft.ML.Transforms.Image.ImageResizingEstimator.ResizingKind.Fill)) 57 | .Append(ctx.Transforms.ExtractPixels(outputColumnName: "Pixels", inputColumnName: "ResizedImage", offsetImage:255, scaleImage: 1, orderOfExtraction: ColorsOrder.ABGR)) 58 | .Append(ctx.Transforms.CopyColumns(outputColumnName:"data", inputColumnName: "Pixels")) 59 | .Append(ctx.Transforms.ApplyOnnxModel(modelFile: modelPath)); 60 | 61 | // Define empty DataView to create inferencing pipeline 62 | var emptyDv = ctx.Data.LoadFromEnumerable(new ModelInput[] {}); 63 | 64 | // Build inferencing pipeline 65 | var model = pipeline.Fit(emptyDv); 66 | 67 | // (Optional) 68 | ctx.Model.Save(model,emptyDv.Schema,"model.mlnet"); 69 | 70 | // Use inferencing pipeline 71 | var input = new ModelInput {ImagePath=imagePath}; 72 | var predictionEngine = ctx.Model.CreatePredictionEngine(model); 73 | var prediction = predictionEngine.Predict(input); 74 | var predictedLabel = prediction.GetPredictedLabel(); 75 | 76 | Console.WriteLine($"Image {imagePath} classified as {labels[predictedLabel]}"); 77 | } 78 | 79 | class ModelInput 80 | { 81 | public string ImagePath { get; set; } 82 | } 83 | 84 | class ModelOutput 85 | { 86 | [ColumnName("model_output")] 87 | public float[] Scores { get; set; } 88 | 89 | public int GetPredictedLabel() => 90 | Array.IndexOf(this.Scores, this.Scores.Max()); 91 | } -------------------------------------------------------------------------------- /samples/csharp/onnx/classification/Main.cs: -------------------------------------------------------------------------------- 1 | using System.CommandLine; 2 | using System.IO; 3 | using Microsoft.ML.OnnxRuntime; 4 | using Microsoft.ML.OnnxRuntime.Tensors; 5 | using SixLabors.ImageSharp; 6 | using SixLabors.ImageSharp.PixelFormats; 7 | using SixLabors.ImageSharp.Processing; 8 | 9 | 10 | namespace CustomVision 11 | { 12 | class Program 13 | { 14 | static async Task Main(string[] args) 15 | { 16 | var modelFilepathArgument = new Argument("model_filepath"); 17 | var imageFilepathArgument = new Argument("image_filepath"); 18 | var command = new RootCommand 19 | { 20 | modelFilepathArgument, 21 | imageFilepathArgument 22 | }; 23 | 24 | command.SetHandler((FileInfo modelFilepath, FileInfo imageFilepath) => { 25 | Predict(modelFilepath, imageFilepath); 26 | }, modelFilepathArgument, imageFilepathArgument); 27 | 28 | await command.InvokeAsync(args); 29 | } 30 | 31 | static void Predict(FileInfo modelFilepath, FileInfo imageFilepath) 32 | { 33 | var session = new InferenceSession(modelFilepath.ToString()); 34 | bool isBgr = session.ModelMetadata.CustomMetadataMap["Image.BitmapPixelFormat"] == "Bgr8"; 35 | bool isRange255 = session.ModelMetadata.CustomMetadataMap["Image.NominalPixelRange"] == "NominalRange_0_255"; 36 | var inputName = session.InputMetadata.Keys.First(); 37 | int inputSize = session.InputMetadata[inputName].Dimensions[2]; 38 | 39 | Tensor tensor = LoadInputTensor(imageFilepath, inputSize, isBgr, isRange255); 40 | 41 | var inputs = new List 42 | { 43 | NamedOnnxValue.CreateFromTensor(inputName, tensor) 44 | }; 45 | 46 | var resultsCollection = session.Run(inputs); 47 | var outputs = resultsCollection.First().AsTensor(); 48 | 49 | for (var i = 0; i < outputs.Length; i++) { 50 | Console.WriteLine("Label: {0}, Probability: {1}", i, outputs[0, i]); 51 | } 52 | } 53 | 54 | // Load an image file and create a tensor. 55 | static Tensor LoadInputTensor(FileInfo imageFilepath, int imageSize, bool isBgr, bool isRange255) 56 | { 57 | var input = new DenseTensor(new[] {1, 3, imageSize, imageSize}); 58 | using (var image = Image.Load(imageFilepath.ToString())) 59 | { 60 | image.Mutate(x => x.Resize(imageSize, imageSize)); 61 | 62 | for (int y = 0; y < image.Height; y++) 63 | { 64 | Span pixelSpan = image.GetPixelRowSpan(y); 65 | for (int x = 0; x < image.Width; x++) 66 | { 67 | if (isBgr) 68 | { 69 | input[0, 0, y, x] = pixelSpan[x].B; 70 | input[0, 1, y, x] = pixelSpan[x].G; 71 | input[0, 2, y, x] = pixelSpan[x].R; 72 | } 73 | else 74 | { 75 | input[0, 0, y, x] = pixelSpan[x].R; 76 | input[0, 1, y, x] = pixelSpan[x].G; 77 | input[0, 2, y, x] = pixelSpan[x].B; 78 | } 79 | 80 | if (!isRange255) 81 | { 82 | input[0, 0, y, x] = input[0, 0, y, x] / 255; 83 | input[0, 1, y, x] = input[0, 1, y, x] / 255; 84 | input[0, 2, y, x] = input[0, 2, y, x] / 255; 85 | } 86 | } 87 | } 88 | } 89 | return input; 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to customvision-export-samples 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 5 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 6 | 7 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 8 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 9 | provided by the bot. You will only need to do this once across all repos using our CLA. 10 | 11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 14 | 15 | - [Code of Conduct](#coc) 16 | - [Issues and Bugs](#issue) 17 | - [Feature Requests](#feature) 18 | - [Submission Guidelines](#submit) 19 | 20 | ## Code of Conduct 21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 22 | 23 | ## Found an Issue? 24 | If you find a bug in the source code or a mistake in the documentation, you can help us by 25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can 26 | [submit a Pull Request](#submit-pr) with a fix. 27 | 28 | ## Want a Feature? 29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub 30 | Repository. If you would like to *implement* a new feature, please submit an issue with 31 | a proposal for your work first, to be sure that we can use it. 32 | 33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr). 34 | 35 | ## Submission Guidelines 36 | 37 | ### Submitting an Issue 38 | Before you submit an issue, search the archive, maybe your question was already answered. 39 | 40 | If your issue appears to be a bug, and hasn't been reported, open a new issue. 41 | Help us to maximize the effort we can spend fixing issues and adding new 42 | features, by not reporting duplicate issues. Providing the following information will increase the 43 | chances of your issue being dealt with quickly: 44 | 45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps 46 | * **Version** - what version is affected (e.g. 0.1.2) 47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you 48 | * **Browsers and Operating System** - is this a problem with all browsers? 49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps 50 | * **Related Issues** - has a similar issue been reported before? 51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be 52 | causing the problem (line of code or commit) 53 | 54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/Azure-Samples/customvision-export-samples/issues/new]. 55 | 56 | ### Submitting a Pull Request (PR) 57 | Before you submit your Pull Request (PR) consider the following guidelines: 58 | 59 | * Search the repository (https://github.com/Azure-Samples/customvision-export-samples/pulls) for an open or closed PR 60 | that relates to your submission. You don't want to duplicate effort. 61 | 62 | * Make your changes in a new git fork: 63 | 64 | * Commit your changes using a descriptive commit message 65 | * Push your fork to GitHub: 66 | * In GitHub, create a pull request 67 | * If we suggest changes then: 68 | * Make the required updates. 69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request): 70 | 71 | ```shell 72 | git rebase master -i 73 | git push -f 74 | ``` 75 | 76 | That's it! Thank you for your contribution! 77 | -------------------------------------------------------------------------------- /samples/python/openvino/object_detection_no_postprocess_s1/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | from openvino.inference_engine import IECore 6 | 7 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | IOU_THRESHOLD = 0.50 10 | 11 | 12 | class NonMaxSuppression: 13 | def __init__(self, max_detections, prob_threshold, iou_threshold): 14 | self.max_detections = max_detections 15 | self.prob_threshold = prob_threshold 16 | self.iou_threshold = iou_threshold 17 | 18 | def __call__(self, boxes, class_probs): 19 | """ 20 | Args: 21 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 22 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 23 | """ 24 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 25 | assert len(class_probs.shape) == 2 26 | assert len(boxes) == len(class_probs) 27 | classes = np.argmax(class_probs, axis=1) 28 | probs = class_probs[np.arange(len(class_probs)), classes] 29 | valid_indices = probs > self.prob_threshold 30 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 31 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 32 | selected_boxes = [] 33 | selected_classes = [] 34 | selected_probs = [] 35 | max_detections = min(self.max_detections, len(boxes)) 36 | 37 | while len(selected_boxes) < max_detections: 38 | i = np.argmax(probs) 39 | if probs[i] < self.prob_threshold: 40 | break 41 | 42 | # Save the selected prediction 43 | selected_boxes.append(boxes[i]) 44 | selected_classes.append(classes[i]) 45 | selected_probs.append(probs[i]) 46 | 47 | box = boxes[i] 48 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 49 | other_boxes = boxes[other_indices] 50 | 51 | # Get overlap between the 'box' and 'other_boxes' 52 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 53 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 54 | wh = np.maximum(0, xy2 - xy) 55 | 56 | # Calculate Intersection Over Union (IOU) 57 | overlap_area = wh[:, 0] * wh[:, 1] 58 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 59 | 60 | # Find the overlapping predictions 61 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 62 | overlapping_indices = np.append(overlapping_indices, i) 63 | 64 | probs[overlapping_indices] = 0 65 | 66 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 67 | 68 | 69 | class Model: 70 | OUTPUT_SIZE = 13 # Output Height/Width. 71 | 72 | def __init__(self, xml_filepath, bin_filepath): 73 | ie = IECore() 74 | net = ie.read_network(str(xml_filepath), str(bin_filepath)) 75 | assert len(net.input_info) == 1 and len(net.outputs) == 2 76 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 77 | self.exec_net = ie.load_network(network=net, device_name='CPU') 78 | self.input_name = list(net.input_info.keys())[0] 79 | self.input_shape = net.input_info[self.input_name].input_data.shape[2:] 80 | assert set(net.outputs.keys()) == set(['raw_probs', 'raw_boxes']) 81 | 82 | def predict(self, image_filepath): 83 | # The model requires RGB[0-1] NCHW input. 84 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 85 | input_array = np.array(image)[np.newaxis, :, :, :] 86 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 87 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 88 | 89 | outputs = self.exec_net.infer(inputs={self.input_name: input_array}) 90 | return self._postprocess(outputs) 91 | 92 | def _postprocess(self, outputs): 93 | raw_probs = outputs['raw_probs'][0][:, 1:] 94 | raw_boxes = outputs['raw_boxes'][0] 95 | center_xy = raw_boxes[:, :2] 96 | wh = raw_boxes[:, 2:] 97 | xy = center_xy - wh / 2 98 | boxes = np.concatenate((xy, xy + wh), axis=1) 99 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, raw_probs) 100 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 101 | 102 | 103 | def print_outputs(outputs): 104 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 105 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 106 | if score > PROB_THRESHOLD: 107 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 108 | 109 | 110 | def main(): 111 | parser = argparse.ArgumentParser() 112 | parser.add_argument('xml_filepath', type=pathlib.Path) 113 | parser.add_argument('bin_filepath', type=pathlib.Path) 114 | parser.add_argument('image_filepath', type=pathlib.Path) 115 | 116 | args = parser.parse_args() 117 | 118 | model = Model(args.xml_filepath, args.bin_filepath) 119 | outputs = model.predict(args.image_filepath) 120 | print_outputs(outputs) 121 | 122 | 123 | if __name__ == '__main__': 124 | main() 125 | -------------------------------------------------------------------------------- /samples/csharp/mlnet/object_detection_s1/Program.cs: -------------------------------------------------------------------------------- 1 | using Microsoft.ML; 2 | using Microsoft.ML.Data; 3 | using System.CommandLine; 4 | 5 | // Command Line Config 6 | var imagePathOption = new Option(name:"--image_path",description:"The path of the image to run inference on,"); 7 | var modelPathOption = new Option(name:"--model_path", description:"The path of the ONNX model used for inferencing."); 8 | var labelPathOption = new Option(name:"--labels_path",description: "The path of the labels file for your ONNX model."); 9 | var confidenceOption = new Option(name:"--confidence", description: "Value used to filter out bounding boxes with lower confidence.", getDefaultValue: () => 0.7f); 10 | 11 | var rootCommand = new RootCommand("Sample application to run inferencing using an ML.NET pipeline and an Azure Custom Vision ONNX model"); 12 | 13 | rootCommand.AddOption(imagePathOption); 14 | rootCommand.AddOption(modelPathOption); 15 | rootCommand.AddOption(labelPathOption); 16 | rootCommand.AddOption(confidenceOption); 17 | 18 | var CLIHandler = (string image, string model, string labels, float confidence) => 19 | { 20 | 21 | if(image == null) 22 | { 23 | Console.WriteLine("Missing --image_path parameter"); 24 | return; 25 | } 26 | 27 | if(model == null) 28 | { 29 | Console.WriteLine("Missing --model_path parameter"); 30 | return; 31 | } 32 | 33 | if(labels == null) 34 | { 35 | Console.WriteLine("Missing --labels_path parameter"); 36 | return; 37 | } 38 | 39 | RunInference(image!, model!, labels!,confidence!); 40 | }; 41 | 42 | rootCommand.SetHandler(CLIHandler, imagePathOption, modelPathOption, labelPathOption,confidenceOption); 43 | 44 | await rootCommand.InvokeAsync(args); 45 | 46 | // Run Inference Helper Function 47 | static void RunInference(string imagePath, string modelPath, string labelPath,float confidence) 48 | { 49 | // Initialize MLContext 50 | var ctx = new MLContext(); 51 | 52 | // Load labels 53 | var labels = File.ReadAllLines(labelPath); 54 | 55 | // Define inferencing pipeline 56 | var pipeline = 57 | ctx.Transforms.LoadImages(outputColumnName: "Image",null,inputColumnName:"ImagePath") 58 | .Append(ctx.Transforms.ResizeImages(outputColumnName: "ResizedImage", imageWidth: 320, imageHeight: 320, inputColumnName: "Image", resizing: Microsoft.ML.Transforms.Image.ImageResizingEstimator.ResizingKind.Fill)) 59 | .Append(ctx.Transforms.ExtractPixels(outputColumnName: "Pixels", inputColumnName: "ResizedImage", offsetImage:255, scaleImage: 1)) 60 | .Append(ctx.Transforms.CopyColumns(outputColumnName:"image_tensor","Pixels")) 61 | .Append(ctx.Transforms.ApplyOnnxModel(modelFile: modelPath)); 62 | 63 | // Define empty DataView to create inferencing pipeline 64 | var emptyDv = ctx.Data.LoadFromEnumerable(new ModelInput[] {}); 65 | 66 | // Build inferencing pipeline 67 | var model = pipeline.Fit(emptyDv); 68 | 69 | // (Optional) 70 | ctx.Model.Save(model,emptyDv.Schema,"model.mlnet"); 71 | 72 | // Use inferencing pipeline 73 | var input = new ModelInput {ImagePath=imagePath}; 74 | var predictionEngine = ctx.Model.CreatePredictionEngine(model); 75 | var prediction = predictionEngine.Predict(input); 76 | 77 | // Get bounding boxes 78 | var boundingBoxes = prediction.ToBoundingBoxes(labels, MLImage.CreateFromFile(input.ImagePath)); 79 | 80 | // Get top bounding boxes based on probability 81 | var topBoundingBoxes = 82 | boundingBoxes 83 | .Where(x => x.Probability > confidence) 84 | .OrderByDescending(x => x.Probability) 85 | .ToArray(); 86 | 87 | // Print out bounding box information to the console 88 | foreach(var b in topBoundingBoxes) 89 | { 90 | Console.WriteLine(b); 91 | } 92 | } 93 | 94 | class ModelInput 95 | { 96 | public string? ImagePath { get; set; } 97 | } 98 | 99 | class ModelOutput 100 | { 101 | 102 | [ColumnName("detected_boxes")] 103 | [VectorType()] 104 | public float[]? Boxes {get;set;} 105 | 106 | [ColumnName("detected_scores")] 107 | [VectorType()] 108 | public float[]? Scores {get;set;} 109 | 110 | [ColumnName("detected_classes")] 111 | [VectorType()] 112 | public long[]? Classes {get;set;} 113 | 114 | // Helper functions 115 | 116 | public BoundingBox[] ToBoundingBoxes(string[] labels, MLImage originalImage) 117 | { 118 | var bboxCoordinates = 119 | this.Boxes! 120 | .Chunk(4) 121 | .ToArray(); 122 | 123 | var boundingBoxes = 124 | bboxCoordinates 125 | .Select((coordinates,idx) => 126 | new BoundingBox 127 | { 128 | TopLeft=(X: coordinates[0] * originalImage.Width, Y: coordinates[1] * originalImage.Height), 129 | BottomRight=(X: coordinates[2] * originalImage.Width, Y: coordinates[3] * originalImage.Height), 130 | PredictedClass=labels[this.Classes![idx]], 131 | Probability=this.Scores![idx] 132 | }) 133 | .ToArray(); 134 | 135 | return boundingBoxes; 136 | } 137 | } 138 | 139 | public class BoundingBox 140 | { 141 | public (float X, float Y) TopLeft {get;set;} 142 | public (float X, float Y) BottomRight {get;set;} 143 | public string? PredictedClass {get;set;} 144 | public float Probability {get;set;} 145 | 146 | public override string ToString() => 147 | $"Top Left (x,y): ({TopLeft.X},{TopLeft.Y})\nBottom Right (x,y): ({BottomRight.X},{BottomRight.Y})\nClass: {PredictedClass}\nProbability: {Probability})"; 148 | } 149 | -------------------------------------------------------------------------------- /samples/python/tensorflow_saved_model/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | IOU_THRESHOLD = 0.45 10 | 11 | 12 | class NonMaxSuppression: 13 | def __init__(self, max_detections, prob_threshold, iou_threshold): 14 | self.max_detections = max_detections 15 | self.prob_threshold = prob_threshold 16 | self.iou_threshold = iou_threshold 17 | 18 | def __call__(self, boxes, class_probs): 19 | """ 20 | Args: 21 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 22 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 23 | """ 24 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 25 | assert len(class_probs.shape) == 2 26 | assert len(boxes) == len(class_probs) 27 | classes = np.argmax(class_probs, axis=1) 28 | probs = class_probs[np.arange(len(class_probs)), classes] 29 | valid_indices = probs > self.prob_threshold 30 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 31 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 32 | selected_boxes = [] 33 | selected_classes = [] 34 | selected_probs = [] 35 | max_detections = min(self.max_detections, len(boxes)) 36 | 37 | while len(selected_boxes) < max_detections: 38 | i = np.argmax(probs) 39 | if probs[i] < self.prob_threshold: 40 | break 41 | 42 | # Save the selected prediction 43 | selected_boxes.append(boxes[i]) 44 | selected_classes.append(classes[i]) 45 | selected_probs.append(probs[i]) 46 | 47 | box = boxes[i] 48 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 49 | other_boxes = boxes[other_indices] 50 | 51 | # Get overlap between the 'box' and 'other_boxes' 52 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 53 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 54 | wh = np.maximum(0, xy2 - xy) 55 | 56 | # Calculate Intersection Over Union (IOU) 57 | overlap_area = wh[:, 0] * wh[:, 1] 58 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 59 | 60 | # Find the overlapping predictions 61 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 62 | overlapping_indices = np.append(overlapping_indices, i) 63 | 64 | probs[overlapping_indices] = 0 65 | 66 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 67 | 68 | 69 | class Model: 70 | ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) 71 | 72 | def __init__(self, model_dirpath): 73 | model = tensorflow.saved_model.load(str(model_dirpath)) 74 | self.serve = model.signatures['serving_default'] 75 | self.input_shape = self.serve.inputs[0].shape[1:3] 76 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 77 | 78 | def predict(self, image_filepath): 79 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 80 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 81 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 82 | 83 | input_tensor = tensorflow.convert_to_tensor(input_array) 84 | outputs = self.serve(input_tensor) 85 | return self._postprocess(list(outputs.values())[0].numpy(), self.ANCHORS) 86 | 87 | def _postprocess(self, outputs, anchors): 88 | assert len(outputs.shape) == 4 and outputs.shape[0] == 1 89 | outputs = outputs[0] 90 | assert len(anchors.shape) == 2 91 | num_anchors = anchors.shape[0] 92 | height, width, channels = outputs.shape 93 | assert channels % num_anchors == 0 94 | num_classes = channels // num_anchors - 5 95 | outputs = outputs.reshape((height, width, num_anchors, -1)) 96 | 97 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 98 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 99 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 100 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 101 | 102 | x = x - w / 2 103 | y = y - h / 2 104 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 105 | 106 | objectness = self._logistic(outputs[..., 4, np.newaxis]) 107 | class_probs = outputs[..., 5:] 108 | class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) 109 | class_probs = (class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness).reshape(-1, num_classes) 110 | 111 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 112 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 113 | 114 | def _logistic(self, x): 115 | return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) 116 | 117 | 118 | def print_outputs(outputs): 119 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 120 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 121 | if score > PROB_THRESHOLD: 122 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 123 | 124 | 125 | def main(): 126 | parser = argparse.ArgumentParser() 127 | parser.add_argument('model_dirpath', type=pathlib.Path) 128 | parser.add_argument('image_filepath', type=pathlib.Path) 129 | 130 | args = parser.parse_args() 131 | 132 | if args.model_dirpath.is_file(): 133 | args.model_dirpath = args.model_dirpath.parent 134 | 135 | model = Model(args.model_dirpath) 136 | outputs = model.predict(args.image_filepath) 137 | print_outputs(outputs) 138 | 139 | 140 | if __name__ == '__main__': 141 | main() 142 | -------------------------------------------------------------------------------- /samples/python/coreml/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import coremltools 5 | import PIL.Image 6 | 7 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | IOU_THRESHOLD = 0.45 10 | 11 | 12 | class NonMaxSuppression: 13 | def __init__(self, max_detections, prob_threshold, iou_threshold): 14 | self.max_detections = max_detections 15 | self.prob_threshold = prob_threshold 16 | self.iou_threshold = iou_threshold 17 | 18 | def __call__(self, boxes, class_probs): 19 | """ 20 | Args: 21 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 22 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 23 | """ 24 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 25 | assert len(class_probs.shape) == 2 26 | assert len(boxes) == len(class_probs) 27 | classes = np.argmax(class_probs, axis=1) 28 | probs = class_probs[np.arange(len(class_probs)), classes] 29 | valid_indices = probs > self.prob_threshold 30 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 31 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 32 | selected_boxes = [] 33 | selected_classes = [] 34 | selected_probs = [] 35 | max_detections = min(self.max_detections, len(boxes)) 36 | 37 | while len(selected_boxes) < max_detections: 38 | i = np.argmax(probs) 39 | if probs[i] < self.prob_threshold: 40 | break 41 | 42 | # Save the selected prediction 43 | selected_boxes.append(boxes[i]) 44 | selected_classes.append(classes[i]) 45 | selected_probs.append(probs[i]) 46 | 47 | box = boxes[i] 48 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 49 | other_boxes = boxes[other_indices] 50 | 51 | # Get overlap between the 'box' and 'other_boxes' 52 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 53 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 54 | wh = np.maximum(0, xy2 - xy) 55 | 56 | # Calculate Intersection Over Union (IOU) 57 | overlap_area = wh[:, 0] * wh[:, 1] 58 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 59 | 60 | # Find the overlapping predictions 61 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 62 | overlapping_indices = np.append(overlapping_indices, i) 63 | 64 | probs[overlapping_indices] = 0 65 | 66 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 67 | 68 | 69 | class Model: 70 | ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) 71 | 72 | def __init__(self, model_filepath): 73 | self.model = coremltools.models.MLModel(str(model_filepath)) 74 | spec = self.model.get_spec() 75 | assert len(spec.description.input) == 1 76 | input_description = spec.description.input[0] 77 | self.input_name = input_description.name 78 | self.input_shape = (input_description.type.imageType.width, input_description.type.imageType.height) 79 | assert len(spec.description.output) == 1 80 | self.output_name = spec.description.output[0].name 81 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 82 | 83 | def predict(self, image_filepath): 84 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 85 | outputs = self.model.predict({self.input_name: image}) 86 | return self._postprocess(outputs[self.output_name], self.ANCHORS) 87 | 88 | def _postprocess(self, outputs, anchors): 89 | assert len(outputs.shape) == 5 and outputs.shape[0] == 1 90 | outputs = outputs[0][0].transpose((1, 2, 0)) 91 | assert len(anchors.shape) == 2 92 | num_anchors = anchors.shape[0] 93 | height, width, channels = outputs.shape 94 | assert channels % num_anchors == 0 95 | num_classes = channels // num_anchors - 5 96 | outputs = outputs.reshape((height, width, num_anchors, -1)) 97 | 98 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 99 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 100 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 101 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 102 | 103 | x = x - w / 2 104 | y = y - h / 2 105 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 106 | 107 | objectness = self._logistic(outputs[..., 4, np.newaxis]) 108 | class_probs = outputs[..., 5:] 109 | class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) 110 | class_probs = (class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness).reshape(-1, num_classes) 111 | 112 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 113 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 114 | 115 | def _logistic(self, x): 116 | return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) 117 | 118 | 119 | def print_outputs(outputs): 120 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 121 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 122 | if score > PROB_THRESHOLD: 123 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 124 | 125 | 126 | def main(): 127 | parser = argparse.ArgumentParser() 128 | parser.add_argument('model_filepath', type=pathlib.Path) 129 | parser.add_argument('image_filepath', type=pathlib.Path) 130 | 131 | args = parser.parse_args() 132 | 133 | model = Model(args.model_filepath) 134 | outputs = model.predict(args.image_filepath) 135 | print_outputs(outputs) 136 | 137 | 138 | if __name__ == '__main__': 139 | main() 140 | -------------------------------------------------------------------------------- /samples/python/openvino/object_detection_no_postprocess/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import xml.etree.ElementTree as ET 4 | import numpy as np 5 | import PIL.Image 6 | from openvino.inference_engine import IECore 7 | 8 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 9 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 10 | IOU_THRESHOLD = 0.45 11 | 12 | 13 | class NonMaxSuppression: 14 | def __init__(self, max_detections, prob_threshold, iou_threshold): 15 | self.max_detections = max_detections 16 | self.prob_threshold = prob_threshold 17 | self.iou_threshold = iou_threshold 18 | 19 | def __call__(self, boxes, class_probs): 20 | """ 21 | Args: 22 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 23 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 24 | """ 25 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 26 | assert len(class_probs.shape) == 2 27 | assert len(boxes) == len(class_probs) 28 | classes = np.argmax(class_probs, axis=1) 29 | probs = class_probs[np.arange(len(class_probs)), classes] 30 | valid_indices = probs > self.prob_threshold 31 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 32 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 33 | selected_boxes = [] 34 | selected_classes = [] 35 | selected_probs = [] 36 | max_detections = min(self.max_detections, len(boxes)) 37 | 38 | while len(selected_boxes) < max_detections: 39 | i = np.argmax(probs) 40 | if probs[i] < self.prob_threshold: 41 | break 42 | 43 | # Save the selected prediction 44 | selected_boxes.append(boxes[i]) 45 | selected_classes.append(classes[i]) 46 | selected_probs.append(probs[i]) 47 | 48 | box = boxes[i] 49 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 50 | other_boxes = boxes[other_indices] 51 | 52 | # Get overlap between the 'box' and 'other_boxes' 53 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 54 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 55 | wh = np.maximum(0, xy2 - xy) 56 | 57 | # Calculate Intersection Over Union (IOU) 58 | overlap_area = wh[:, 0] * wh[:, 1] 59 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 60 | 61 | # Find the overlapping predictions 62 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 63 | overlapping_indices = np.append(overlapping_indices, i) 64 | 65 | probs[overlapping_indices] = 0 66 | 67 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 68 | 69 | 70 | class Model: 71 | OUTPUT_SIZE = 13 # Output Height/Width. 72 | 73 | def __init__(self, xml_filepath, bin_filepath): 74 | ie = IECore() 75 | net = ie.read_network(str(xml_filepath), str(bin_filepath)) 76 | assert len(net.input_info) == 1 and len(net.outputs) == 1 77 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 78 | self.exec_net = ie.load_network(network=net, device_name='CPU') 79 | self.input_name = list(net.input_info.keys())[0] 80 | self.input_shape = net.input_info[self.input_name].input_data.shape[2:] 81 | self.output_name = list(net.outputs.keys())[0] 82 | self.anchors = self._extract_anchors_from_network(xml_filepath) 83 | 84 | def predict(self, image_filepath): 85 | # The model requires RGB[0-1] NCHW input. 86 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 87 | input_array = np.array(image)[np.newaxis, :, :, :] 88 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 89 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 90 | 91 | outputs = self.exec_net.infer(inputs={self.input_name: input_array}) 92 | return self._postprocess(outputs[self.output_name], self.anchors) 93 | 94 | @staticmethod 95 | def _extract_anchors_from_network(xml_filepath): 96 | root = ET.parse(xml_filepath).getroot() 97 | for layer in root.find('layers').findall('layer'): 98 | if layer.get('type') == 'RegionYolo': 99 | anchors = [float(a) for a in layer.find('data').get('anchors').split(',')] 100 | return np.array(anchors, dtype=np.float).reshape(-1, 2) 101 | 102 | raise RuntimeError("RegionYolo layer is not found.") 103 | 104 | def _postprocess(self, outputs, anchors): 105 | assert len(outputs.shape) == 2 and outputs.shape[0] == 1 106 | outputs = outputs.reshape(-1, self.OUTPUT_SIZE, self.OUTPUT_SIZE).transpose((1, 2, 0)) 107 | assert len(anchors.shape) == 2 108 | num_anchors = anchors.shape[0] 109 | height, width, channels = outputs.shape 110 | assert channels % num_anchors == 0 111 | num_classes = channels // num_anchors - 5 112 | outputs = outputs.reshape((height, width, num_anchors, -1)) 113 | 114 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 115 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 116 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 117 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 118 | 119 | x = x - w / 2 120 | y = y - h / 2 121 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 122 | class_probs = (outputs[..., 5:] * outputs[..., 4, np.newaxis]).reshape(-1, num_classes) 123 | 124 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 125 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1, 1), 'detected_scores': detected_scores.reshape(1, -1, 1)} 126 | 127 | 128 | def print_outputs(outputs): 129 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 130 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 131 | if score[0] > PROB_THRESHOLD: 132 | print(f"Label: {class_id[0]}, Probability: {score[0]:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 133 | 134 | 135 | def main(): 136 | parser = argparse.ArgumentParser() 137 | parser.add_argument('xml_filepath', type=pathlib.Path) 138 | parser.add_argument('bin_filepath', type=pathlib.Path) 139 | parser.add_argument('image_filepath', type=pathlib.Path) 140 | 141 | args = parser.parse_args() 142 | 143 | model = Model(args.xml_filepath, args.bin_filepath) 144 | outputs = model.predict(args.image_filepath) 145 | print_outputs(outputs) 146 | 147 | 148 | if __name__ == '__main__': 149 | main() 150 | -------------------------------------------------------------------------------- /samples/python/onnx/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import onnx 5 | import onnxruntime 6 | import PIL.Image 7 | 8 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 9 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 10 | IOU_THRESHOLD = 0.45 11 | 12 | 13 | class NonMaxSuppression: 14 | def __init__(self, max_detections, prob_threshold, iou_threshold): 15 | self.max_detections = max_detections 16 | self.prob_threshold = prob_threshold 17 | self.iou_threshold = iou_threshold 18 | 19 | def __call__(self, boxes, class_probs): 20 | """ 21 | Args: 22 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 23 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 24 | """ 25 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 26 | assert len(class_probs.shape) == 2 27 | assert len(boxes) == len(class_probs) 28 | classes = np.argmax(class_probs, axis=1) 29 | probs = class_probs[np.arange(len(class_probs)), classes] 30 | valid_indices = probs > self.prob_threshold 31 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 32 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 33 | selected_boxes = [] 34 | selected_classes = [] 35 | selected_probs = [] 36 | max_detections = min(self.max_detections, len(boxes)) 37 | 38 | while len(selected_boxes) < max_detections: 39 | i = np.argmax(probs) 40 | if probs[i] < self.prob_threshold: 41 | break 42 | 43 | # Save the selected prediction 44 | selected_boxes.append(boxes[i]) 45 | selected_classes.append(classes[i]) 46 | selected_probs.append(probs[i]) 47 | 48 | box = boxes[i] 49 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 50 | other_boxes = boxes[other_indices] 51 | 52 | # Get overlap between the 'box' and 'other_boxes' 53 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 54 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 55 | wh = np.maximum(0, xy2 - xy) 56 | 57 | # Calculate Intersection Over Union (IOU) 58 | overlap_area = wh[:, 0] * wh[:, 1] 59 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 60 | 61 | # Find the overlapping predictions 62 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 63 | overlapping_indices = np.append(overlapping_indices, i) 64 | 65 | probs[overlapping_indices] = 0 66 | 67 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 68 | 69 | 70 | class Model: 71 | ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) 72 | 73 | def __init__(self, model_filepath): 74 | self.session = onnxruntime.InferenceSession(str(model_filepath)) 75 | assert len(self.session.get_inputs()) == 1 76 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 77 | self.input_shape = self.session.get_inputs()[0].shape[2:] 78 | self.input_name = self.session.get_inputs()[0].name 79 | self.input_type = {'tensor(float)': np.float32, 'tensor(float16)': np.float16}[self.session.get_inputs()[0].type] 80 | self.output_names = [o.name for o in self.session.get_outputs()] 81 | assert len(self.output_names) == 1 82 | 83 | self.is_bgr = False 84 | self.is_range255 = False 85 | onnx_model = onnx.load(model_filepath) 86 | for metadata in onnx_model.metadata_props: 87 | if metadata.key == 'Image.BitmapPixelFormat' and metadata.value == 'Bgr8': 88 | self.is_bgr = True 89 | elif metadata.key == 'Image.NominalPixelRange' and metadata.value == 'NominalRange_0_255': 90 | self.is_range255 = True 91 | 92 | def predict(self, image_filepath): 93 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 94 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 95 | input_array = input_array.transpose((0, 3, 1, 2)) # => (N, C, H, W) 96 | if self.is_bgr: 97 | input_array = input_array[:, (2, 1, 0), :, :] 98 | if not self.is_range255: 99 | input_array = input_array / 255 # => Pixel values should be in range [0, 1] 100 | 101 | outputs = self.session.run(self.output_names, {self.input_name: input_array.astype(self.input_type)}) 102 | return self._postprocess(outputs[0], self.ANCHORS) 103 | 104 | def _postprocess(self, outputs, anchors): 105 | assert len(outputs.shape) == 4 and outputs.shape[0] == 1 106 | outputs = outputs[0].transpose((1, 2, 0)) 107 | assert len(anchors.shape) == 2 108 | num_anchors = anchors.shape[0] 109 | height, width, channels = outputs.shape 110 | assert channels % num_anchors == 0 111 | num_classes = channels // num_anchors - 5 112 | outputs = outputs.reshape((height, width, num_anchors, -1)) 113 | 114 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 115 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 116 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 117 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 118 | 119 | x = x - w / 2 120 | y = y - h / 2 121 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 122 | 123 | objectness = self._logistic(outputs[..., 4, np.newaxis]) 124 | class_probs = outputs[..., 5:] 125 | class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) 126 | class_probs = (class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness).reshape(-1, num_classes) 127 | 128 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 129 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 130 | 131 | def _logistic(self, x): 132 | return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) 133 | 134 | 135 | def print_outputs(outputs): 136 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 137 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 138 | if score > PROB_THRESHOLD: 139 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 140 | 141 | 142 | def main(): 143 | parser = argparse.ArgumentParser() 144 | parser.add_argument('model_filepath', type=pathlib.Path) 145 | parser.add_argument('image_filepath', type=pathlib.Path) 146 | 147 | args = parser.parse_args() 148 | 149 | model = Model(args.model_filepath) 150 | outputs = model.predict(args.image_filepath) 151 | print_outputs(outputs) 152 | 153 | 154 | if __name__ == '__main__': 155 | main() 156 | -------------------------------------------------------------------------------- /samples/python/tensorflow_lite/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | IOU_THRESHOLD = 0.45 10 | 11 | 12 | class NonMaxSuppression: 13 | def __init__(self, max_detections, prob_threshold, iou_threshold): 14 | self.max_detections = max_detections 15 | self.prob_threshold = prob_threshold 16 | self.iou_threshold = iou_threshold 17 | 18 | def __call__(self, boxes, class_probs): 19 | """ 20 | Args: 21 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 22 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 23 | """ 24 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 25 | assert len(class_probs.shape) == 2 26 | assert len(boxes) == len(class_probs) 27 | classes = np.argmax(class_probs, axis=1) 28 | probs = class_probs[np.arange(len(class_probs)), classes] 29 | valid_indices = probs > self.prob_threshold 30 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 31 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 32 | selected_boxes = [] 33 | selected_classes = [] 34 | selected_probs = [] 35 | max_detections = min(self.max_detections, len(boxes)) 36 | 37 | while len(selected_boxes) < max_detections: 38 | i = np.argmax(probs) 39 | if probs[i] < self.prob_threshold: 40 | break 41 | 42 | # Save the selected prediction 43 | selected_boxes.append(boxes[i]) 44 | selected_classes.append(classes[i]) 45 | selected_probs.append(probs[i]) 46 | 47 | box = boxes[i] 48 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 49 | other_boxes = boxes[other_indices] 50 | 51 | # Get overlap between the 'box' and 'other_boxes' 52 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 53 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 54 | wh = np.maximum(0, xy2 - xy) 55 | 56 | # Calculate Intersection Over Union (IOU) 57 | overlap_area = wh[:, 0] * wh[:, 1] 58 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 59 | 60 | # Find the overlapping predictions 61 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 62 | overlapping_indices = np.append(overlapping_indices, i) 63 | 64 | probs[overlapping_indices] = 0 65 | 66 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 67 | 68 | 69 | class Model: 70 | ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) 71 | 72 | def __init__(self, model_filepath): 73 | self.interpreter = tensorflow.lite.Interpreter(model_path=str(model_filepath)) 74 | self.interpreter.allocate_tensors() 75 | 76 | self.input_details = self.interpreter.get_input_details() 77 | self.output_details = self.interpreter.get_output_details() 78 | assert len(self.input_details) == 1 and len(self.output_details) == 1 79 | self.input_shape = self.input_details[0]['shape'][1:3] 80 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 81 | 82 | def predict(self, image_filepath): 83 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 84 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 85 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 86 | 87 | self.interpreter.set_tensor(self.input_details[0]['index'], input_array) 88 | self.interpreter.invoke() 89 | 90 | outputs = [self.interpreter.get_tensor(detail['index']) for detail in self.output_details] 91 | return self._postprocess(outputs[0], self.ANCHORS) 92 | 93 | @staticmethod 94 | def _get_graph_inout(graph_def): 95 | input_names = [] 96 | inputs_set = set() 97 | outputs_set = set() 98 | 99 | for node in graph_def.node: 100 | if node.op == 'Placeholder': 101 | input_names.append(node.name) 102 | 103 | for i in node.input: 104 | inputs_set.add(i.split(':')[0]) 105 | outputs_set.add(node.name) 106 | 107 | output_names = list(outputs_set - inputs_set) 108 | return input_names, output_names 109 | 110 | @staticmethod 111 | def _get_input_shape(graph_def, input_name): 112 | for node in graph_def.node: 113 | if node.name == input_name: 114 | return [dim.size for dim in node.attr['shape'].shape.dim][1:3] 115 | 116 | def _postprocess(self, outputs, anchors): 117 | assert len(outputs.shape) == 4 and outputs.shape[0] == 1 118 | outputs = outputs[0] 119 | assert len(anchors.shape) == 2 120 | num_anchors = anchors.shape[0] 121 | height, width, channels = outputs.shape 122 | assert channels % num_anchors == 0 123 | num_classes = channels // num_anchors - 5 124 | outputs = outputs.reshape((height, width, num_anchors, -1)) 125 | 126 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 127 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 128 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 129 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 130 | 131 | x = x - w / 2 132 | y = y - h / 2 133 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 134 | 135 | objectness = self._logistic(outputs[..., 4, np.newaxis]) 136 | class_probs = outputs[..., 5:] 137 | class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) 138 | class_probs = (class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness).reshape(-1, num_classes) 139 | 140 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 141 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 142 | 143 | def _logistic(self, x): 144 | return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) 145 | 146 | 147 | def print_outputs(outputs): 148 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 149 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 150 | if score > PROB_THRESHOLD: 151 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 152 | 153 | 154 | def main(): 155 | parser = argparse.ArgumentParser() 156 | parser.add_argument('model_filepath', type=pathlib.Path) 157 | parser.add_argument('image_filepath', type=pathlib.Path) 158 | 159 | args = parser.parse_args() 160 | 161 | model = Model(args.model_filepath) 162 | outputs = model.predict(args.image_filepath) 163 | print_outputs(outputs) 164 | 165 | 166 | if __name__ == '__main__': 167 | main() 168 | -------------------------------------------------------------------------------- /samples/python/tensorflow/object_detection/predict.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import pathlib 3 | import numpy as np 4 | import PIL.Image 5 | import tensorflow 6 | 7 | MAX_DETECTIONS = 64 # Max number of boxes to detect. 8 | PROB_THRESHOLD = 0.01 # Minimum probably to show results. 9 | IOU_THRESHOLD = 0.45 10 | 11 | 12 | class NonMaxSuppression: 13 | def __init__(self, max_detections, prob_threshold, iou_threshold): 14 | self.max_detections = max_detections 15 | self.prob_threshold = prob_threshold 16 | self.iou_threshold = iou_threshold 17 | 18 | def __call__(self, boxes, class_probs): 19 | """ 20 | Args: 21 | boxes (np.array with shape [-1, 4]): bounding boxes. [x, y, x2, y2] 22 | class_probs: (np.array with shape[-1, num_classes]): probabilities for each boxes and classes. 23 | """ 24 | assert len(boxes.shape) == 2 and boxes.shape[1] == 4 25 | assert len(class_probs.shape) == 2 26 | assert len(boxes) == len(class_probs) 27 | classes = np.argmax(class_probs, axis=1) 28 | probs = class_probs[np.arange(len(class_probs)), classes] 29 | valid_indices = probs > self.prob_threshold 30 | boxes, classes, probs = boxes[valid_indices, :], classes[valid_indices], probs[valid_indices] 31 | areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 32 | selected_boxes = [] 33 | selected_classes = [] 34 | selected_probs = [] 35 | max_detections = min(self.max_detections, len(boxes)) 36 | 37 | while len(selected_boxes) < max_detections: 38 | i = np.argmax(probs) 39 | if probs[i] < self.prob_threshold: 40 | break 41 | 42 | # Save the selected prediction 43 | selected_boxes.append(boxes[i]) 44 | selected_classes.append(classes[i]) 45 | selected_probs.append(probs[i]) 46 | 47 | box = boxes[i] 48 | other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes)))) 49 | other_boxes = boxes[other_indices] 50 | 51 | # Get overlap between the 'box' and 'other_boxes' 52 | xy = np.maximum(box[0:2], other_boxes[:, 0:2]) 53 | xy2 = np.minimum(box[2:4], other_boxes[:, 2:4]) 54 | wh = np.maximum(0, xy2 - xy) 55 | 56 | # Calculate Intersection Over Union (IOU) 57 | overlap_area = wh[:, 0] * wh[:, 1] 58 | iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) 59 | 60 | # Find the overlapping predictions 61 | overlapping_indices = other_indices[np.where(iou > self.iou_threshold)[0]] 62 | overlapping_indices = np.append(overlapping_indices, i) 63 | 64 | probs[overlapping_indices] = 0 65 | 66 | return np.array(selected_boxes), np.array(selected_classes), np.array(selected_probs) 67 | 68 | 69 | class Model: 70 | ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) 71 | 72 | def __init__(self, model_filepath): 73 | self.graph_def = tensorflow.compat.v1.GraphDef() 74 | self.graph_def.ParseFromString(model_filepath.read_bytes()) 75 | 76 | self.nms = NonMaxSuppression(MAX_DETECTIONS, PROB_THRESHOLD, IOU_THRESHOLD) 77 | input_names, self.output_names = self._get_graph_inout(self.graph_def) 78 | assert len(input_names) == 1 and len(self.output_names) == 1 79 | self.input_name = input_names[0] 80 | self.input_shape = self._get_input_shape(self.graph_def, self.input_name) 81 | 82 | def predict(self, image_filepath): 83 | image = PIL.Image.open(image_filepath).resize(self.input_shape) 84 | input_array = np.array(image, dtype=np.float32)[np.newaxis, :, :, :] 85 | input_array = input_array[:, :, :, (2, 1, 0)] # => BGR 86 | 87 | with tensorflow.compat.v1.Session() as sess: 88 | tensorflow.import_graph_def(self.graph_def, name='') 89 | out_tensors = [sess.graph.get_tensor_by_name(o + ':0') for o in self.output_names] 90 | outputs = sess.run(out_tensors, {self.input_name + ':0': input_array}) 91 | return self._postprocess(outputs[0], self.ANCHORS) 92 | 93 | @staticmethod 94 | def _get_graph_inout(graph_def): 95 | input_names = [] 96 | inputs_set = set() 97 | outputs_set = set() 98 | 99 | for node in graph_def.node: 100 | if node.op == 'Placeholder': 101 | input_names.append(node.name) 102 | 103 | for i in node.input: 104 | inputs_set.add(i.split(':')[0]) 105 | outputs_set.add(node.name) 106 | 107 | output_names = list(outputs_set - inputs_set) 108 | return input_names, output_names 109 | 110 | @staticmethod 111 | def _get_input_shape(graph_def, input_name): 112 | for node in graph_def.node: 113 | if node.name == input_name: 114 | return [dim.size for dim in node.attr['shape'].shape.dim][1:3] 115 | 116 | def _postprocess(self, outputs, anchors): 117 | assert len(outputs.shape) == 4 and outputs.shape[0] == 1 118 | outputs = outputs[0] 119 | assert len(anchors.shape) == 2 120 | num_anchors = anchors.shape[0] 121 | height, width, channels = outputs.shape 122 | assert channels % num_anchors == 0 123 | num_classes = channels // num_anchors - 5 124 | outputs = outputs.reshape((height, width, num_anchors, -1)) 125 | 126 | x = (outputs[..., 0] + np.arange(width)[np.newaxis, :, np.newaxis]) / width 127 | y = (outputs[..., 1] + np.arange(height)[:, np.newaxis, np.newaxis]) / height 128 | w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width 129 | h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height 130 | 131 | x = x - w / 2 132 | y = y - h / 2 133 | boxes = np.stack((x, y, x + w, y + h), axis=-1).reshape(-1, 4) 134 | 135 | objectness = self._logistic(outputs[..., 4, np.newaxis]) 136 | class_probs = outputs[..., 5:] 137 | class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) 138 | class_probs = (class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness).reshape(-1, num_classes) 139 | 140 | detected_boxes, detected_classes, detected_scores = self.nms(boxes, class_probs) 141 | return {'detected_boxes': detected_boxes.reshape(1, -1, 4), 'detected_classes': detected_classes.reshape(1, -1), 'detected_scores': detected_scores.reshape(1, -1)} 142 | 143 | def _logistic(self, x): 144 | return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) 145 | 146 | 147 | def print_outputs(outputs): 148 | assert set(outputs.keys()) == set(['detected_boxes', 'detected_classes', 'detected_scores']) 149 | for box, class_id, score in zip(outputs['detected_boxes'][0], outputs['detected_classes'][0], outputs['detected_scores'][0]): 150 | if score > PROB_THRESHOLD: 151 | print(f"Label: {class_id}, Probability: {score:.5f}, box: ({box[0]:.5f}, {box[1]:.5f}) ({box[2]:.5f}, {box[3]:.5f})") 152 | 153 | 154 | def main(): 155 | parser = argparse.ArgumentParser() 156 | parser.add_argument('model_filepath', type=pathlib.Path) 157 | parser.add_argument('image_filepath', type=pathlib.Path) 158 | 159 | args = parser.parse_args() 160 | 161 | model = Model(args.model_filepath) 162 | outputs = model.predict(args.image_filepath) 163 | print_outputs(outputs) 164 | 165 | 166 | if __name__ == '__main__': 167 | main() 168 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | [Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Aa][Rr][Mm]/ 27 | [Aa][Rr][Mm]64/ 28 | bld/ 29 | [Bb]in/ 30 | [Oo]bj/ 31 | [Ll]og/ 32 | [Ll]ogs/ 33 | 34 | # Visual Studio 2015/2017 cache/options directory 35 | .vs/ 36 | # Uncomment if you have tasks that create the project's static files in wwwroot 37 | #wwwroot/ 38 | 39 | # Visual Studio 2017 auto generated files 40 | Generated\ Files/ 41 | 42 | # MSTest test Results 43 | [Tt]est[Rr]esult*/ 44 | [Bb]uild[Ll]og.* 45 | 46 | # NUnit 47 | *.VisualState.xml 48 | TestResult.xml 49 | nunit-*.xml 50 | 51 | # Build Results of an ATL Project 52 | [Dd]ebugPS/ 53 | [Rr]eleasePS/ 54 | dlldata.c 55 | 56 | # Benchmark Results 57 | BenchmarkDotNet.Artifacts/ 58 | 59 | # .NET Core 60 | project.lock.json 61 | project.fragment.lock.json 62 | artifacts/ 63 | 64 | # StyleCop 65 | StyleCopReport.xml 66 | 67 | # Files built by Visual Studio 68 | *_i.c 69 | *_p.c 70 | *_h.h 71 | *.ilk 72 | *.meta 73 | *.obj 74 | *.iobj 75 | *.pch 76 | *.pdb 77 | *.ipdb 78 | *.pgc 79 | *.pgd 80 | *.rsp 81 | *.sbr 82 | *.tlb 83 | *.tli 84 | *.tlh 85 | *.tmp 86 | *.tmp_proj 87 | *_wpftmp.csproj 88 | *.log 89 | *.vspscc 90 | *.vssscc 91 | .builds 92 | *.pidb 93 | *.svclog 94 | *.scc 95 | 96 | # Chutzpah Test files 97 | _Chutzpah* 98 | 99 | # Visual C++ cache files 100 | ipch/ 101 | *.aps 102 | *.ncb 103 | *.opendb 104 | *.opensdf 105 | *.sdf 106 | *.cachefile 107 | *.VC.db 108 | *.VC.VC.opendb 109 | 110 | # Visual Studio profiler 111 | *.psess 112 | *.vsp 113 | *.vspx 114 | *.sap 115 | 116 | # Visual Studio Trace Files 117 | *.e2e 118 | 119 | # TFS 2012 Local Workspace 120 | $tf/ 121 | 122 | # Guidance Automation Toolkit 123 | *.gpState 124 | 125 | # ReSharper is a .NET coding add-in 126 | _ReSharper*/ 127 | *.[Rr]e[Ss]harper 128 | *.DotSettings.user 129 | 130 | # TeamCity is a build add-in 131 | _TeamCity* 132 | 133 | # DotCover is a Code Coverage Tool 134 | *.dotCover 135 | 136 | # AxoCover is a Code Coverage Tool 137 | .axoCover/* 138 | !.axoCover/settings.json 139 | 140 | # Visual Studio code coverage results 141 | *.coverage 142 | *.coveragexml 143 | 144 | # NCrunch 145 | _NCrunch_* 146 | .*crunch*.local.xml 147 | nCrunchTemp_* 148 | 149 | # MightyMoose 150 | *.mm.* 151 | AutoTest.Net/ 152 | 153 | # Web workbench (sass) 154 | .sass-cache/ 155 | 156 | # Installshield output folder 157 | [Ee]xpress/ 158 | 159 | # DocProject is a documentation generator add-in 160 | DocProject/buildhelp/ 161 | DocProject/Help/*.HxT 162 | DocProject/Help/*.HxC 163 | DocProject/Help/*.hhc 164 | DocProject/Help/*.hhk 165 | DocProject/Help/*.hhp 166 | DocProject/Help/Html2 167 | DocProject/Help/html 168 | 169 | # Click-Once directory 170 | publish/ 171 | 172 | # Publish Web Output 173 | *.[Pp]ublish.xml 174 | *.azurePubxml 175 | # Note: Comment the next line if you want to checkin your web deploy settings, 176 | # but database connection strings (with potential passwords) will be unencrypted 177 | *.pubxml 178 | *.publishproj 179 | 180 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 181 | # checkin your Azure Web App publish settings, but sensitive information contained 182 | # in these scripts will be unencrypted 183 | PublishScripts/ 184 | 185 | # NuGet Packages 186 | *.nupkg 187 | # NuGet Symbol Packages 188 | *.snupkg 189 | # The packages folder can be ignored because of Package Restore 190 | **/[Pp]ackages/* 191 | # except build/, which is used as an MSBuild target. 192 | !**/[Pp]ackages/build/ 193 | # Uncomment if necessary however generally it will be regenerated when needed 194 | #!**/[Pp]ackages/repositories.config 195 | # NuGet v3's project.json files produces more ignorable files 196 | *.nuget.props 197 | *.nuget.targets 198 | 199 | # Microsoft Azure Build Output 200 | csx/ 201 | *.build.csdef 202 | 203 | # Microsoft Azure Emulator 204 | ecf/ 205 | rcf/ 206 | 207 | # Windows Store app package directories and files 208 | AppPackages/ 209 | BundleArtifacts/ 210 | Package.StoreAssociation.xml 211 | _pkginfo.txt 212 | *.appx 213 | *.appxbundle 214 | *.appxupload 215 | 216 | # Visual Studio cache files 217 | # files ending in .cache can be ignored 218 | *.[Cc]ache 219 | # but keep track of directories ending in .cache 220 | !?*.[Cc]ache/ 221 | 222 | # Others 223 | ClientBin/ 224 | ~$* 225 | *~ 226 | *.dbmdl 227 | *.dbproj.schemaview 228 | *.jfm 229 | *.pfx 230 | *.publishsettings 231 | orleans.codegen.cs 232 | 233 | # Including strong name files can present a security risk 234 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 235 | #*.snk 236 | 237 | # Since there are multiple workflows, uncomment next line to ignore bower_components 238 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 239 | #bower_components/ 240 | 241 | # RIA/Silverlight projects 242 | Generated_Code/ 243 | 244 | # Backup & report files from converting an old project file 245 | # to a newer Visual Studio version. Backup files are not needed, 246 | # because we have git ;-) 247 | _UpgradeReport_Files/ 248 | Backup*/ 249 | UpgradeLog*.XML 250 | UpgradeLog*.htm 251 | ServiceFabricBackup/ 252 | *.rptproj.bak 253 | 254 | # SQL Server files 255 | *.mdf 256 | *.ldf 257 | *.ndf 258 | 259 | # Business Intelligence projects 260 | *.rdl.data 261 | *.bim.layout 262 | *.bim_*.settings 263 | *.rptproj.rsuser 264 | *- [Bb]ackup.rdl 265 | *- [Bb]ackup ([0-9]).rdl 266 | *- [Bb]ackup ([0-9][0-9]).rdl 267 | 268 | # Microsoft Fakes 269 | FakesAssemblies/ 270 | 271 | # GhostDoc plugin setting file 272 | *.GhostDoc.xml 273 | 274 | # Node.js Tools for Visual Studio 275 | .ntvs_analysis.dat 276 | node_modules/ 277 | 278 | # Visual Studio 6 build log 279 | *.plg 280 | 281 | # Visual Studio 6 workspace options file 282 | *.opt 283 | 284 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 285 | *.vbw 286 | 287 | # Visual Studio LightSwitch build output 288 | **/*.HTMLClient/GeneratedArtifacts 289 | **/*.DesktopClient/GeneratedArtifacts 290 | **/*.DesktopClient/ModelManifest.xml 291 | **/*.Server/GeneratedArtifacts 292 | **/*.Server/ModelManifest.xml 293 | _Pvt_Extensions 294 | 295 | # Paket dependency manager 296 | .paket/paket.exe 297 | paket-files/ 298 | 299 | # FAKE - F# Make 300 | .fake/ 301 | 302 | # CodeRush personal settings 303 | .cr/personal 304 | 305 | # Python Tools for Visual Studio (PTVS) 306 | __pycache__/ 307 | *.pyc 308 | 309 | # Cake - Uncomment if you are using it 310 | # tools/** 311 | # !tools/packages.config 312 | 313 | # Tabs Studio 314 | *.tss 315 | 316 | # Telerik's JustMock configuration file 317 | *.jmconfig 318 | 319 | # BizTalk build output 320 | *.btp.cs 321 | *.btm.cs 322 | *.odx.cs 323 | *.xsd.cs 324 | 325 | # OpenCover UI analysis results 326 | OpenCover/ 327 | 328 | # Azure Stream Analytics local run output 329 | ASALocalRun/ 330 | 331 | # MSBuild Binary and Structured Log 332 | *.binlog 333 | 334 | # NVidia Nsight GPU debugger configuration file 335 | *.nvuser 336 | 337 | # MFractors (Xamarin productivity tool) working folder 338 | .mfractor/ 339 | 340 | # Local History for Visual Studio 341 | .localhistory/ 342 | 343 | # BeatPulse healthcheck temp database 344 | healthchecksdb 345 | 346 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 347 | MigrationBackup/ 348 | 349 | # Ionide (cross platform F# VS Code tools) working folder 350 | .ionide/ 351 | --------------------------------------------------------------------------------