├── .gitignore ├── .vscode └── settings.json ├── LICENSE ├── README.MD ├── assets ├── 5minTeaser.PNG ├── AzureSASgitDiff.PNG ├── AzureSASnameModule.PNG ├── AzureSASonCloudCreate.PNG ├── AzureSASonCloudInput.PNG ├── AzureSASonCloudMarketplace.PNG ├── AzureSASonCloudOutput.PNG ├── AzureSASonCloudQuery.PNG ├── AzureSASonCloudStart.PNG ├── AzureSASonCloudTest.PNG ├── AzureSASonEdgeInput.PNG ├── AzureSASonEdgeMarketplace.PNG ├── AzureSASonEdgeOutput.PNG ├── AzureSASonEdgeOutputAggregated.PNG ├── AzureSASonEdgeOutputSummarized.PNG ├── AzureSASonEdgeOutputs.PNG ├── AzureSASonEdgeQuery.PNG ├── AzureSASonEdgeQueryTest.PNG ├── AzureSASonEdgeQueryTested.PNG ├── AzureSASprep.PNG ├── AzureSASpublish.PNG ├── AzureSASselectJob.PNG ├── AzureSASselectModule.PNG ├── AzureSASselectTemplate.PNG ├── AzureSASstorage.PNG ├── AzureSASupdateImage.PNG ├── AzureSASupdateRoute.PNG ├── AzureStorageConnectionString.PNG ├── AzureStorageContainerCreate.PNG ├── AzureStorageCreate.PNG ├── AzureStorageMarkeplace.PNG ├── AzureStorageOverview.PNG ├── CameraTaggingModule.PNG ├── CreateNewCustomVisionAI.PNG ├── CreateSASonEdge.PNG ├── DeploySASonCloud.PNG ├── DeploySASonEdge.PNG ├── DownloadCustomVisionAI.PNG ├── ExportCustomVisionAI.PNG ├── IoTHubArchitecture.PNG ├── IoTHubConsumerGroup.PNG ├── IoTShow.PNG ├── LiveStream1.PNG ├── LiveStream2.PNG ├── LiveStream3.PNG ├── LiveStream4.PNG ├── LiveStream5.PNG ├── MonitorEndpoint.PNG ├── MonitorEndpointOutput.PNG ├── Playlist.PNG ├── PowerBIAskQuestion.PNG ├── PowerBIAuth.PNG ├── PowerBIDataset.PNG ├── PowerBIDatasource.PNG ├── PowerBIImport.PNG ├── PowerBIImportFile.PNG ├── PowerBINewDashboard.PNG ├── PowerBIPinLive.PNG ├── PowerBIPinViz.PNG ├── PowerBIPinned.PNG ├── PowerBIPublishReport.PNG ├── PowerBITile.PNG ├── PowerBITileDataset.PNG ├── PowerBITileViz.PNG ├── PowerBITileVizName.PNG ├── PowerBIWorkspace.PNG ├── PowerBIWorkspaceAdd.PNG ├── TSIConfig1.PNG ├── TSIConfig2.PNG ├── TSIConfig3.PNG ├── TSIConfig4.PNG ├── TSICreate.PNG ├── TSIDefaultExplorer.PNG ├── TSIDefaultModel.PNG ├── TSIDeployed.PNG ├── TSIExplorerModified.PNG ├── TSIHierarchies.PNG ├── TSIInstancePart1.PNG ├── TSIInstancePart2.PNG ├── TSIInstancePart3.PNG ├── TSIMarketplace.PNG ├── TSITypes.PNG ├── TSIVideo.PNG ├── TagSamplesCustomVisionAI.PNG ├── TrainSamplesCustomVisionAI.PNG └── UploadSamplesCustomVisionAI.PNG ├── deployment-iotcentral └── deployment.template.json ├── deployment-iothub └── deployment.template.json ├── docs ├── Module 1 - Introduction to NVIDIA DeepStream.md ├── Module 2 - Configure and Deploy Intelligent Video Analytics to IoT Edge Runtime on NVIDIA Jetson.md ├── Module 3 - Develop and deploy Custom Object Detection Models with IoT Edge DeepStream SDK Module.md ├── Module 4 - Filtering Telemetry with Azure Stream Analytics at the Edge and Modeling with Azure Time Series Insights.md └── Module 5 - Visualizing Object Detection Data in Near Real-Time with PowerBI.md ├── modules └── IoTCentralBridge │ ├── .gitignore │ ├── arm64v8.Dockerfile │ ├── module.json │ ├── package.json │ ├── src │ ├── apis │ │ ├── health.ts │ │ ├── index.ts │ │ └── module.ts │ ├── index.ts │ ├── manifest.ts │ ├── services │ │ ├── __tests__ │ │ │ └── module.ts │ │ ├── config.ts │ │ ├── device.ts │ │ ├── health.ts │ │ ├── index.ts │ │ ├── iotCentral.ts │ │ ├── logging.ts │ │ ├── module.ts │ │ ├── state.ts │ │ └── storage.ts │ └── utils │ │ ├── bind.ts │ │ ├── defer.ts │ │ ├── emptyObj.ts │ │ ├── forget.ts │ │ ├── index.ts │ │ ├── pjson.ts │ │ └── sleep.ts │ ├── tsconfig.json │ └── tslint.json └── services ├── AZURE_STREAMING_ANALYTICS ├── Cloud │ └── IoTHubToPowerBI.sql └── Edge │ ├── DeepStreamAnalytics.sql │ ├── DemoData.json │ └── SampleInput.json ├── CUSTOM_VISION_AI ├── LICENSE ├── labels.txt └── model.onnx ├── DEEPSTREAM ├── YoloParser │ ├── CustomVision_DeepStream5.0_JetPack4.4 │ │ ├── Makefile │ │ ├── README.MD │ │ ├── libnvdsinfer_custom_impl_Yolo.so │ │ └── nvdsparsebbox_Yolo.cpp │ ├── CustomVision_DeepStream6.1.1_JetPack5.0.2 │ │ ├── Makefile │ │ ├── README.MD │ │ ├── libnvdsinfer_custom_impl_Yolo.so │ │ ├── nvdsparsebbox_Yolo.cpp │ │ └── nvdsparsebbox_Yolo.o │ ├── CustomVision_DeepStream6.1_JetPack5.0.1 │ │ ├── Makefile │ │ ├── README.MD │ │ ├── libnvdsinfer_custom_impl_Yolo.so │ │ ├── nvdsparsebbox_Yolo.cpp │ │ └── nvdsparsebbox_Yolo.o │ ├── CustomYolo_DeepStream5.0_JetPack4.4 │ │ ├── README.MD │ │ └── libnvdsinfer_custom_impl_Yolo.so │ ├── CustomYolo_DeepStream6.1.1_JetPack5.0.2 │ │ ├── README.MD │ │ └── libnvdsinfer_custom_impl_Yolo.so │ └── CustomYolo_DeepStream6.1_JetPack5.0.1 │ │ ├── README.MD │ │ └── libnvdsinfer_custom_impl_Yolo.so └── configs │ ├── DSConfig-CustomVisionAI.txt │ ├── DSConfig-YoloV3.txt │ ├── DSConfig-YoloV3Tiny.txt │ ├── config_infer_primary_CustomVisionAI.txt │ ├── config_infer_primary_yoloV3.txt │ ├── config_infer_primary_yoloV3_tiny.txt │ └── msgconv_config.txt ├── IOT_CENTRAL └── NVIDIAJetsonDCM.json ├── POWER_BI └── DeepStream+PowerBI.pbit ├── TIME_SERIES_INSIGHTS ├── Hierarchies │ └── Locations.json └── Types │ └── ObjectDetectionType.json └── YOLOV3 ├── downloadYoloWeights.sh └── labels.txt /.gitignore: -------------------------------------------------------------------------------- 1 | config/ 2 | .env 3 | services/CUSTOM_VISION_AI/model.onnx_b1_gpu0_fp32.engine 4 | services/YOLOV3/yolov3.cfg 5 | services/YOLOV3/yolov3-tiny.cfg 6 | services/YOLOV3/yolov3.weights 7 | services/YOLOV3/yolov3-tiny.weights 8 | services/YOLOV3/yolov3_model_b1_gpu0_fp16.engine 9 | services/YOLOV3/yolov3_tiny_model_b1_gpu0_fp16.engine 10 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "azure-iot-edge.defaultPlatform": { 3 | "platform": "arm64v8", 4 | "alias": null 5 | } 6 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Paul DeCarlo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | ## Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure 2 | 3 | A repository demonstrating an end-to-end architecture for Intelligent Video Analytics using NVIDIA hardware with Microsoft Azure. 4 | 5 | This project contains a collection of self-paced learning modules which guide the user in developing a custom Intelligent Video Analytics application that can handle a variety of video input sources, leverage a custom object detection model, and provide backing cloud services for analysis and reporting. 6 | 7 | * [Module 1 - Introduction to NVIDIA DeepStream](./docs/Module%201%20-%20Introduction%20to%20NVIDIA%20DeepStream.md) 8 | * [Module 2 - Configure and Deploy "Intelligent Video Analytics" to IoT Edge Runtime on NVIDIA Jetson](./docs/Module%202%20-%20Configure%20and%20Deploy%20Intelligent%20Video%20Analytics%20to%20IoT%20Edge%20Runtime%20on%20NVIDIA%20Jetson.md) 9 | * [Module 3 - Develop and deploy Custom Object Detection Models with IoT Edge DeepStream SDK Module](./docs/Module%203%20-%20Develop%20and%20deploy%20Custom%20Object%20Detection%20Models%20with%20IoT%20Edge%20DeepStream%20SDK%20Module.md) 10 | * [Module 4 - Filtering Telemetry with Azure Stream Analytics at the Edge and Modeling with Azure Time Series Insights](./docs/Module%204%20-%20Filtering%20Telemetry%20with%20Azure%20Stream%20Analytics%20at%20the%20Edge%20and%20Modeling%20with%20Azure%20Time%20Series%20Insights.md) 11 | * [Module 5 - Visualizing Object Detection Data in Near Real-Time with PowerBI](./docs/Module%205%20-%20Visualizing%20Object%20Detection%20Data%20in%20Near%20Real-Time%20with%20PowerBI.md) 12 | 13 | Each of these modules is accompanied by a LiveStream that walks through the steps to reproduce in full detail. You can watch a build out of the entire project from the ground up by checking out the following [5-part video playlist](https://www.youtube.com/playlist?list=PLzgEG9tLG-1QLc-DPPABoW1YWFMPNQl4t) on Youtube. 14 | 15 | [![5 part video playlist](./assets/Playlist.PNG)](https://www.youtube.com/playlist?list=PLzgEG9tLG-1QLc-DPPABoW1YWFMPNQl4t) 16 | 17 | ## Overview 18 | 19 | [![5 minute teaser](./assets/5minTeaser.PNG)](https://www.youtube.com/watch?v=-DWrxUITSbc) 20 | 21 | 22 | The project makes use of the [NVIDIA DeepStream SDK](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/nvidia.deepstream-iot?tab=Overview&WT.mc_id=julyot-iva-pdecarlo) running on [NVIDIA Jetson Embedded hardware](https://www.nvidia.com/en-us/autonomous-machines/jetson-store/) to produce an Intelligent Video Analytics Pipeline. 23 | 24 | The solution employs a number of modules that run on the NVIDIA hardware device which are instrumented using the [Azure IoT Edge](https://azure.microsoft.com/en-us/services/iot-edge/?WT.mc_id=julyot-iva-pdecarlo) runtime. These modules include the [Azure Blob Storage on IoT Edge Module](https://docs.microsoft.com/en-us/azure/iot-edge/how-to-deploy-blob?WT.mc_id=julyot-iva-pdecarlo) for capturing and mirroring object detection training samples to the cloud via a paired [Camera Tagging Module](https://dev.to/azure/introduction-to-the-azure-iot-edge-camera-tagging-module-di8). These captured samples are then used to train a custom object detection model with the [Custom Vision AI](https://www.customvision.ai/?WT.mc_id=julyot-iva-pdecarlo) offering from [Azure Cognitive Services](https://docs.microsoft.com/en-us/azure/cognitive-services/?WT.mc_id=julyot-iva-pdecarlo). Models generated by this service are leveraged by the DeepStream SDK module using a [Custom Yolo Parser](https://github.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/tree/master/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1.0_JetPack5.0.2). 25 | 26 | As object detections are produced by the DeepStream SDK, they are filtered using an [Azure Stream Analytics on Edge Job](https://docs.microsoft.com/en-us/azure/stream-analytics/stream-analytics-edge?WT.mc_id=julyot-iva-pdecarlo) that transforms the output into summarized detections. These object detection results are then transmitted to an [Azure IoT Hub](https://docs.microsoft.com/en-us/azure/iot-hub/?WT.mc_id=julyot-iva-pdecarlo) where they can be forwarded to additional cloud services for processing and reporting. 27 | 28 | The cloud services employed include [Time Series Insights](https://docs.microsoft.com/en-us/azure/time-series-insights/?WT.mc_id=julyot-iva-pdecarlo), which is a fully managed event processing service for analyzing data over time. We also demonstrate how to forward object detection data to a [PowerBI](https://docs.microsoft.com/en-us/power-bi/?WT.mc_id=julyot-iva-pdecarlo) dataset for live visualization of results within PowerBI Reports and Dashboards. 29 | 30 | ![](./assets/IoTHubArchitecture.PNG) 31 | 32 | For more details on how this all works under the hood, check out this episode of the [IoT Show](https://channel9.msdn.com/Shows/Internet-of-Things-Show) where we cover these capabilities and associated services in depth: 33 | 34 | [![IoT Show Episode](./assets/IoTShow.PNG)]( 35 | https://www.youtube.com/watch?v=EiB1j0FZjgU) 36 | 37 | ## Prerequisites 38 | 39 | Hardware: 40 | * [NVIDIA Jetson Embedded Device](https://www.nvidia.com/en-us/autonomous-machines/jetson-store/) running [JetPack 5.0.2](https://developer.nvidia.com/embedded/jetpack) 41 | * A [cooling fan](https://amzn.to/2ZI2ki9) installed on or pointed at the Nvidia Jetson Nano device 42 | * RTSP Capable Camera (Optional) 43 | - Note: We recommend the [FI9821P from Foscam](https://amzn.to/2XzBRFC) 44 | * USB Webcam (Optional) 45 | - Note: If using a [Jetson Nano](https://amzn.to/2WFE5zF), the power consumption will require that your device is configured to use a [5V/4A barrel adapter](https://amzn.to/32DFsTq) as mentioned [here](https://www.jetsonhacks.com/2019/04/10/jetson-nano-use-more-power/) with an [Open-CV compatible camera](https://web.archive.org/web/20120815172655/http://opencv.willowgarage.com/wiki/Welcome/OS/). 46 | 47 | Development Environment: 48 | - [Visual Studio Code (VSCode)](https://code.visualstudio.com/Download?WT.mc_id=github-IntelligentEdgeHOL-pdecarlo) 49 | - Note: ARM64 builds of VSCode are not officially supported, however, it is possible to install and run the Development Tools on your NVIDIA Jetson Device. This is not recommended on Jetson Nano hardware due to resource limitations. Consult this article on [Getting Started with IoT Edge Development on Nvidia Jetson Devices](https://dev.to/azure/getting-started-with-iot-edge-development-on-nvidia-jetson-devices-2dfl) for more details. 50 | - Visual Studio Code Extensions 51 | - [Azure IoT Tools Extension](https://marketplace.visualstudio.com/items?itemName=vsciot-vscode.azure-iot-tools) 52 | - Git tool(s) 53 | [Git command line](https://git-scm.com/) 54 | 55 | Cloud Services: 56 | - An Active [Microsoft Azure Subscription](https://azure.microsoft.com/en-us/get-started?WT.mc_id=julyot-iva-pdecarlo) 57 | 58 | ## Learn more, get certified 59 | 60 | If you are interested in learning more about building solutions with Azure IoT Services, check out the following free learning resources: 61 | 62 | * [IoT learning paths on Microsoft Learn](https://docs.microsoft.com/learn/browse/?term=IOT&WT.mc_id=julyot-iva-pdecarlo) 63 | * [The IoT show on Channel9](https://channel9.msdn.com/Shows/Internet-of-Things-Show/?WT.mc_id=julyot-iva-pdecarlo) 64 | 65 | Once you have upskilled as an IoT developer, make it official with the [AZ-220 Azure IoT Developer certification](https://docs.microsoft.com/learn/certifications/azure-iot-developer-specialty?WT.mc_id=julyot-iva-pdecarlo). 66 | -------------------------------------------------------------------------------- /assets/5minTeaser.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/5minTeaser.PNG -------------------------------------------------------------------------------- /assets/AzureSASgitDiff.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASgitDiff.PNG -------------------------------------------------------------------------------- /assets/AzureSASnameModule.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASnameModule.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudCreate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudCreate.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudInput.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudInput.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudMarketplace.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudMarketplace.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudOutput.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudOutput.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudQuery.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudQuery.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudStart.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudStart.PNG -------------------------------------------------------------------------------- /assets/AzureSASonCloudTest.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonCloudTest.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeInput.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeInput.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeMarketplace.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeMarketplace.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeOutput.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeOutput.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeOutputAggregated.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeOutputAggregated.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeOutputSummarized.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeOutputSummarized.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeOutputs.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeOutputs.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeQuery.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeQuery.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeQueryTest.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeQueryTest.PNG -------------------------------------------------------------------------------- /assets/AzureSASonEdgeQueryTested.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASonEdgeQueryTested.PNG -------------------------------------------------------------------------------- /assets/AzureSASprep.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASprep.PNG -------------------------------------------------------------------------------- /assets/AzureSASpublish.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASpublish.PNG -------------------------------------------------------------------------------- /assets/AzureSASselectJob.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASselectJob.PNG -------------------------------------------------------------------------------- /assets/AzureSASselectModule.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASselectModule.PNG -------------------------------------------------------------------------------- /assets/AzureSASselectTemplate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASselectTemplate.PNG -------------------------------------------------------------------------------- /assets/AzureSASstorage.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASstorage.PNG -------------------------------------------------------------------------------- /assets/AzureSASupdateImage.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASupdateImage.PNG -------------------------------------------------------------------------------- /assets/AzureSASupdateRoute.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureSASupdateRoute.PNG -------------------------------------------------------------------------------- /assets/AzureStorageConnectionString.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureStorageConnectionString.PNG -------------------------------------------------------------------------------- /assets/AzureStorageContainerCreate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureStorageContainerCreate.PNG -------------------------------------------------------------------------------- /assets/AzureStorageCreate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureStorageCreate.PNG -------------------------------------------------------------------------------- /assets/AzureStorageMarkeplace.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureStorageMarkeplace.PNG -------------------------------------------------------------------------------- /assets/AzureStorageOverview.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/AzureStorageOverview.PNG -------------------------------------------------------------------------------- /assets/CameraTaggingModule.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/CameraTaggingModule.PNG -------------------------------------------------------------------------------- /assets/CreateNewCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/CreateNewCustomVisionAI.PNG -------------------------------------------------------------------------------- /assets/CreateSASonEdge.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/CreateSASonEdge.PNG -------------------------------------------------------------------------------- /assets/DeploySASonCloud.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/DeploySASonCloud.PNG -------------------------------------------------------------------------------- /assets/DeploySASonEdge.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/DeploySASonEdge.PNG -------------------------------------------------------------------------------- /assets/DownloadCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/DownloadCustomVisionAI.PNG -------------------------------------------------------------------------------- /assets/ExportCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/ExportCustomVisionAI.PNG -------------------------------------------------------------------------------- /assets/IoTHubArchitecture.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/IoTHubArchitecture.PNG -------------------------------------------------------------------------------- /assets/IoTHubConsumerGroup.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/IoTHubConsumerGroup.PNG -------------------------------------------------------------------------------- /assets/IoTShow.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/IoTShow.PNG -------------------------------------------------------------------------------- /assets/LiveStream1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/LiveStream1.PNG -------------------------------------------------------------------------------- /assets/LiveStream2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/LiveStream2.PNG -------------------------------------------------------------------------------- /assets/LiveStream3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/LiveStream3.PNG -------------------------------------------------------------------------------- /assets/LiveStream4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/LiveStream4.PNG -------------------------------------------------------------------------------- /assets/LiveStream5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/LiveStream5.PNG -------------------------------------------------------------------------------- /assets/MonitorEndpoint.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/MonitorEndpoint.PNG -------------------------------------------------------------------------------- /assets/MonitorEndpointOutput.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/MonitorEndpointOutput.PNG -------------------------------------------------------------------------------- /assets/Playlist.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/Playlist.PNG -------------------------------------------------------------------------------- /assets/PowerBIAskQuestion.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIAskQuestion.PNG -------------------------------------------------------------------------------- /assets/PowerBIAuth.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIAuth.PNG -------------------------------------------------------------------------------- /assets/PowerBIDataset.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIDataset.PNG -------------------------------------------------------------------------------- /assets/PowerBIDatasource.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIDatasource.PNG -------------------------------------------------------------------------------- /assets/PowerBIImport.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIImport.PNG -------------------------------------------------------------------------------- /assets/PowerBIImportFile.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIImportFile.PNG -------------------------------------------------------------------------------- /assets/PowerBINewDashboard.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBINewDashboard.PNG -------------------------------------------------------------------------------- /assets/PowerBIPinLive.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIPinLive.PNG -------------------------------------------------------------------------------- /assets/PowerBIPinViz.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIPinViz.PNG -------------------------------------------------------------------------------- /assets/PowerBIPinned.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIPinned.PNG -------------------------------------------------------------------------------- /assets/PowerBIPublishReport.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIPublishReport.PNG -------------------------------------------------------------------------------- /assets/PowerBITile.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBITile.PNG -------------------------------------------------------------------------------- /assets/PowerBITileDataset.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBITileDataset.PNG -------------------------------------------------------------------------------- /assets/PowerBITileViz.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBITileViz.PNG -------------------------------------------------------------------------------- /assets/PowerBITileVizName.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBITileVizName.PNG -------------------------------------------------------------------------------- /assets/PowerBIWorkspace.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIWorkspace.PNG -------------------------------------------------------------------------------- /assets/PowerBIWorkspaceAdd.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/PowerBIWorkspaceAdd.PNG -------------------------------------------------------------------------------- /assets/TSIConfig1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIConfig1.PNG -------------------------------------------------------------------------------- /assets/TSIConfig2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIConfig2.PNG -------------------------------------------------------------------------------- /assets/TSIConfig3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIConfig3.PNG -------------------------------------------------------------------------------- /assets/TSIConfig4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIConfig4.PNG -------------------------------------------------------------------------------- /assets/TSICreate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSICreate.PNG -------------------------------------------------------------------------------- /assets/TSIDefaultExplorer.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIDefaultExplorer.PNG -------------------------------------------------------------------------------- /assets/TSIDefaultModel.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIDefaultModel.PNG -------------------------------------------------------------------------------- /assets/TSIDeployed.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIDeployed.PNG -------------------------------------------------------------------------------- /assets/TSIExplorerModified.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIExplorerModified.PNG -------------------------------------------------------------------------------- /assets/TSIHierarchies.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIHierarchies.PNG -------------------------------------------------------------------------------- /assets/TSIInstancePart1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIInstancePart1.PNG -------------------------------------------------------------------------------- /assets/TSIInstancePart2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIInstancePart2.PNG -------------------------------------------------------------------------------- /assets/TSIInstancePart3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIInstancePart3.PNG -------------------------------------------------------------------------------- /assets/TSIMarketplace.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIMarketplace.PNG -------------------------------------------------------------------------------- /assets/TSITypes.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSITypes.PNG -------------------------------------------------------------------------------- /assets/TSIVideo.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TSIVideo.PNG -------------------------------------------------------------------------------- /assets/TagSamplesCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TagSamplesCustomVisionAI.PNG -------------------------------------------------------------------------------- /assets/TrainSamplesCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/TrainSamplesCustomVisionAI.PNG -------------------------------------------------------------------------------- /assets/UploadSamplesCustomVisionAI.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/assets/UploadSamplesCustomVisionAI.PNG -------------------------------------------------------------------------------- /deployment-iotcentral/deployment.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-template": "1.0.0", 3 | "modulesContent": { 4 | "$edgeAgent": { 5 | "properties.desired": { 6 | "schemaVersion": "1.0", 7 | "runtime": { 8 | "type": "docker", 9 | "settings": { 10 | "minDockerVersion": "v1.25", 11 | "loggingOptions": "", 12 | "registryCredentials": { 13 | "containerRegistry": { 14 | "username": "$CONTAINER_REGISTRY_USERNAME", 15 | "password": "$CONTAINER_REGISTRY_PASSWORD", 16 | "address": "$CONTAINER_REGISTRY_NAME" 17 | } 18 | } 19 | } 20 | }, 21 | "systemModules": { 22 | "edgeAgent": { 23 | "type": "docker", 24 | "settings": { 25 | "image": "mcr.microsoft.com/azureiotedge-agent:1.0", 26 | "createOptions": {} 27 | } 28 | }, 29 | "edgeHub": { 30 | "type": "docker", 31 | "status": "running", 32 | "restartPolicy": "always", 33 | "settings": { 34 | "image": "mcr.microsoft.com/azureiotedge-hub:1.0", 35 | "createOptions": { 36 | "HostConfig": { 37 | "PortBindings": { 38 | "5671/tcp": [ 39 | { 40 | "HostPort": "5671" 41 | } 42 | ], 43 | "8883/tcp": [ 44 | { 45 | "HostPort": "8883" 46 | } 47 | ], 48 | "443/tcp": [ 49 | { 50 | "HostPort": "443" 51 | } 52 | ] 53 | } 54 | } 55 | } 56 | } 57 | } 58 | }, 59 | "modules": { 60 | "deepstream": { 61 | "version": "1.0", 62 | "type": "docker", 63 | "status": "running", 64 | "restartPolicy": "always", 65 | "settings": { 66 | "image": "nvcr.io/nvidia/deepstream-l4t:5.0-20.07-iot", 67 | "createOptions": { 68 | "Entrypoint": [ 69 | "/usr/bin/deepstream-test5-app", 70 | "-c", 71 | "DSConfig-CustomVisionAI.txt" 72 | ], 73 | "HostConfig": { 74 | "runtime": "nvidia", 75 | "NetworkMode": "host", 76 | "Binds": [ 77 | "/data/misc/storage:/data/misc/storage", 78 | "/tmp/argus_socket:/tmp/argus_socket", 79 | "/tmp/.X11-unix/:/tmp/.X11-unix/" 80 | ], 81 | "IpcMode": "host" 82 | }, 83 | "NetworkingConfig": { 84 | "EndpointsConfig": { 85 | "host": {} 86 | } 87 | }, 88 | "WorkingDir": "/data/misc/storage/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/services/DEEPSTREAM/configs" 89 | } 90 | }, 91 | "env": { 92 | "DISPLAY": { 93 | "value": ":1" 94 | } 95 | } 96 | }, 97 | "CameraTaggingModule": { 98 | "version": "1.0.3", 99 | "type": "docker", 100 | "status": "running", 101 | "restartPolicy": "always", 102 | "settings": { 103 | "image": "toolboc/camerataggingmodule:latest", 104 | "createOptions": { 105 | "ExposedPorts": { 106 | "3000/tcp": {}, 107 | "3002/tcp": {}, 108 | "3003/tcp": {} 109 | }, 110 | "HostConfig": { 111 | "PortBindings": { 112 | "3000/tcp": [ 113 | { 114 | "HostPort": "3000" 115 | } 116 | ], 117 | "3002/tcp": [ 118 | { 119 | "HostPort": "3002" 120 | } 121 | ], 122 | "3003/tcp": [ 123 | { 124 | "HostPort": "3003" 125 | } 126 | ] 127 | } 128 | } 129 | } 130 | }, 131 | "env": { 132 | "RTSP_IP": { 133 | "value": "wowzaec2demo.streamlock.net" 134 | }, 135 | "RTSP_PORT": { 136 | "value": "554" 137 | }, 138 | "RTSP_PATH": { 139 | "value": "vod/mp4:BigBuckBunny_115k.mov" 140 | }, 141 | "REACT_APP_SERVER_PORT": { 142 | "value": "3003" 143 | }, 144 | "REACT_APP_WEB_SOCKET_PORT": { 145 | "value": "3002" 146 | }, 147 | "REACT_APP_LOCAL_STORAGE_MODULE_NAME": { 148 | "value": "azureblobstorageoniotedge" 149 | }, 150 | "REACT_APP_LOCAL_STORAGE_PORT": { 151 | "value": "11002" 152 | }, 153 | "REACT_APP_LOCAL_STORAGE_ACCOUNT_NAME": { 154 | "value": "$LOCAL_STORAGE_ACCOUNT_NAME" 155 | }, 156 | "REACT_APP_LOCAL_STORAGE_ACCOUNT_KEY": { 157 | "value": "$LOCAL_STORAGE_ACCOUNT_KEY" 158 | } 159 | } 160 | }, 161 | "azureblobstorageoniotedge": { 162 | "version": "1.2", 163 | "type": "docker", 164 | "status": "running", 165 | "restartPolicy": "always", 166 | "settings": { 167 | "image": "mcr.microsoft.com/azure-blob-storage:latest", 168 | "createOptions": { 169 | "Env": [ 170 | "LOCAL_STORAGE_ACCOUNT_NAME=$LOCAL_STORAGE_ACCOUNT_NAME", 171 | "LOCAL_STORAGE_ACCOUNT_KEY=$LOCAL_STORAGE_ACCOUNT_KEY" 172 | ], 173 | "HostConfig": { 174 | "Binds": [ 175 | "/data/containerdata:/blobroot" 176 | ], 177 | "PortBindings": { 178 | "11002/tcp": [ 179 | { 180 | "HostPort": "11002" 181 | } 182 | ] 183 | } 184 | } 185 | } 186 | } 187 | }, 188 | "DeepStreamAnalytics": { 189 | "version": "1.0", 190 | "type": "docker", 191 | "status": "running", 192 | "restartPolicy": "always", 193 | "settings": { 194 | "image": "mcr.microsoft.com/azure-stream-analytics/azureiotedge:1.0.6-linux-arm32v7", 195 | "createOptions": {} 196 | }, 197 | "env": { 198 | "PlanId": { 199 | "value": "stream-analytics-on-iot-edge" 200 | } 201 | } 202 | }, 203 | "IoTCentralBridge": { 204 | "settings": { 205 | "image": "toolboc/iotcentralbridge:0.1.0-arm64v8", 206 | "createOptions": { 207 | "HostConfig": { 208 | "PortBindings": { 209 | "9010/tcp": [ 210 | { 211 | "HostPort": "9014" 212 | } 213 | ] 214 | }, 215 | "Binds": [ 216 | "/data/misc:/data/misc", 217 | "/run/systemd:/run/systemd", 218 | "/var/run/docker.sock:/var/run/docker.sock" 219 | ], 220 | "NetworkMode": "host" 221 | }, 222 | "NetworkingConfig": {"EndpointsConfig": {"host":{}}} 223 | } 224 | }, 225 | "type": "docker", 226 | "env": { 227 | "DEBUG_TELEMETRY": { 228 | "value": "1" 229 | }, 230 | "DEBUG_ROUTING_DATA": { 231 | "value": "0" 232 | }, 233 | "inferenceThrottle": { 234 | "value": 2000 235 | }, 236 | "FORCE_HEALTHCHECK": { 237 | "value": "1" 238 | } 239 | }, 240 | "status": "running", 241 | "restartPolicy": "always", 242 | "version": "1.0" 243 | 244 | } 245 | } 246 | } 247 | }, 248 | "$edgeHub": { 249 | "properties.desired": { 250 | "schemaVersion": "1.0", 251 | "routes": { 252 | "deepstreamToDeepStreamAnalytics": "FROM /messages/modules/deepstream/outputs/* INTO BrokeredEndpoint(\"/modules/DeepStreamAnalytics/inputs/DeepStreamInput\")", 253 | "DeepStreamAnalyticsToIoTHub": "FROM /messages/modules/DeepStreamAnalytics/outputs/SummarizedDetections INTO $upstream", 254 | "azureblobstorageoniotedgeToIoTHub": "FROM /messages/modules/azureblobstorageoniotedge/outputs/* INTO $upstream", 255 | "NVDSToIoTCBridge": "FROM /messages/modules/deepstream/outputs/* INTO BrokeredEndpoint(\"/modules/IoTCentralBridge/inputs/dsmessages\")", 256 | "filterToIoTHub": "FROM /messages/modules/IoTCentralBridge/* INTO $upstream" 257 | }, 258 | "storeAndForwardConfiguration": { 259 | "timeToLiveSecs": 7200 260 | } 261 | } 262 | }, 263 | "azureblobstorageoniotedge": { 264 | "properties.desired": { 265 | "deviceAutoDeleteProperties": { 266 | "deleteOn": false, 267 | "retainWhileUploading": true 268 | }, 269 | "deviceToCloudUploadProperties": { 270 | "uploadOn": true, 271 | "uploadOrder": "OldestFirst", 272 | "cloudStorageConnectionString": "$CLOUD_STORAGE_CONNECTION_STRING", 273 | "storageContainersForUpload": { 274 | "$LOCAL_STORAGE_ACCOUNT_NAME": { 275 | "target": "$DESTINATION_STORAGE_NAME" 276 | } 277 | }, 278 | "deleteAfterUpload": true 279 | } 280 | } 281 | }, 282 | "DeepStreamAnalytics": { 283 | "properties.desired": { 284 | "ASAJobInfo": "https://acceleratededgesasstore.blob.core.windows.net/deepstreamanalytics/ASAEdgeJobs/14ab14a0-b605-429e-ba2e-5bf4ae5b0ba3/e476b018-353d-4fe6-ab10-d05815093e94/ASAEdgeJobDefinition.zip?sv=2018-03-28&sr=b&sig=V5raYdw%2FLPgdUj7MOShYnd91w38G7BHC4FJ7nG8NoGw%3D&st=2020-06-19T22%3A18%3A33Z&se=2023-06-19T22%3A28%3A33Z&sp=r" 285 | } 286 | } 287 | } 288 | } -------------------------------------------------------------------------------- /deployment-iothub/deployment.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-template": "1.0.0", 3 | "modulesContent": { 4 | "$edgeAgent": { 5 | "properties.desired": { 6 | "schemaVersion": "1.0", 7 | "runtime": { 8 | "type": "docker", 9 | "settings": { 10 | "minDockerVersion": "v1.25", 11 | "loggingOptions": "", 12 | "registryCredentials": { 13 | "containerRegistry": { 14 | "username": "$CONTAINER_REGISTRY_USERNAME", 15 | "password": "$CONTAINER_REGISTRY_PASSWORD", 16 | "address": "$CONTAINER_REGISTRY_NAME" 17 | } 18 | } 19 | } 20 | }, 21 | "systemModules": { 22 | "edgeAgent": { 23 | "type": "docker", 24 | "settings": { 25 | "image": "mcr.microsoft.com/azureiotedge-agent:1.0", 26 | "createOptions": {} 27 | } 28 | }, 29 | "edgeHub": { 30 | "type": "docker", 31 | "status": "running", 32 | "restartPolicy": "always", 33 | "settings": { 34 | "image": "mcr.microsoft.com/azureiotedge-hub:1.0", 35 | "createOptions": { 36 | "HostConfig": { 37 | "PortBindings": { 38 | "5671/tcp": [ 39 | { 40 | "HostPort": "5671" 41 | } 42 | ], 43 | "8883/tcp": [ 44 | { 45 | "HostPort": "8883" 46 | } 47 | ], 48 | "443/tcp": [ 49 | { 50 | "HostPort": "443" 51 | } 52 | ] 53 | } 54 | } 55 | } 56 | } 57 | } 58 | }, 59 | "modules": { 60 | "NVIDIADeepStreamSDK": { 61 | "version": "1.0", 62 | "type": "docker", 63 | "status": "running", 64 | "restartPolicy": "always", 65 | "settings": { 66 | "image": "nvcr.io/nvidia/deepstream-l4t:6.1.1-iot", 67 | "createOptions": { 68 | "Entrypoint": [ 69 | "/usr/bin/deepstream-test5-app", 70 | "-c", 71 | "DSConfig-CustomVisionAI.txt" 72 | ], 73 | "HostConfig": { 74 | "runtime": "nvidia", 75 | "NetworkMode": "host", 76 | "Binds": [ 77 | "/data/misc/storage:/data/misc/storage", 78 | "/tmp/argus_socket:/tmp/argus_socket", 79 | "/tmp/.X11-unix/:/tmp/.X11-unix/" 80 | ], 81 | "IpcMode": "host" 82 | }, 83 | "NetworkingConfig": { 84 | "EndpointsConfig": { 85 | "host": {} 86 | } 87 | }, 88 | "WorkingDir": "/data/misc/storage/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/services/DEEPSTREAM/configs" 89 | } 90 | }, 91 | "env": { 92 | "DISPLAY": { 93 | "value": ":1" 94 | } 95 | } 96 | }, 97 | "CameraTaggingModule": { 98 | "version": "1.0.3", 99 | "type": "docker", 100 | "status": "running", 101 | "restartPolicy": "always", 102 | "settings": { 103 | "image": "toolboc/camerataggingmodule:latest", 104 | "createOptions": { 105 | "ExposedPorts": { 106 | "3000/tcp": {}, 107 | "3002/tcp": {}, 108 | "3003/tcp": {} 109 | }, 110 | "HostConfig": { 111 | "PortBindings": { 112 | "3000/tcp": [ 113 | { 114 | "HostPort": "3000" 115 | } 116 | ], 117 | "3002/tcp": [ 118 | { 119 | "HostPort": "3002" 120 | } 121 | ], 122 | "3003/tcp": [ 123 | { 124 | "HostPort": "3003" 125 | } 126 | ] 127 | } 128 | } 129 | } 130 | }, 131 | "env": { 132 | "RTSP_IP": { 133 | "value": "wowzaec2demo.streamlock.net" 134 | }, 135 | "RTSP_PORT": { 136 | "value": "554" 137 | }, 138 | "RTSP_PATH": { 139 | "value": "vod/mp4:BigBuckBunny_115k.mov" 140 | }, 141 | "REACT_APP_SERVER_PORT": { 142 | "value": "3003" 143 | }, 144 | "REACT_APP_WEB_SOCKET_PORT": { 145 | "value": "3002" 146 | }, 147 | "REACT_APP_LOCAL_STORAGE_MODULE_NAME": { 148 | "value": "azureblobstorageoniotedge" 149 | }, 150 | "REACT_APP_LOCAL_STORAGE_PORT": { 151 | "value": "11002" 152 | }, 153 | "REACT_APP_LOCAL_STORAGE_ACCOUNT_NAME": { 154 | "value": "$LOCAL_STORAGE_ACCOUNT_NAME" 155 | }, 156 | "REACT_APP_LOCAL_STORAGE_ACCOUNT_KEY": { 157 | "value": "$LOCAL_STORAGE_ACCOUNT_KEY" 158 | } 159 | } 160 | }, 161 | "azureblobstorageoniotedge": { 162 | "version": "1.2", 163 | "type": "docker", 164 | "status": "running", 165 | "restartPolicy": "always", 166 | "settings": { 167 | "image": "mcr.microsoft.com/azure-blob-storage:latest", 168 | "createOptions": { 169 | "Env": [ 170 | "LOCAL_STORAGE_ACCOUNT_NAME=$LOCAL_STORAGE_ACCOUNT_NAME", 171 | "LOCAL_STORAGE_ACCOUNT_KEY=$LOCAL_STORAGE_ACCOUNT_KEY" 172 | ], 173 | "HostConfig": { 174 | "Binds": [ 175 | "/data/containerdata:/blobroot" 176 | ], 177 | "PortBindings": { 178 | "11002/tcp": [ 179 | { 180 | "HostPort": "11002" 181 | } 182 | ] 183 | } 184 | } 185 | } 186 | } 187 | }, 188 | "DeepStreamAnalytics": { 189 | "version": "1.0", 190 | "type": "docker", 191 | "status": "running", 192 | "restartPolicy": "always", 193 | "settings": { 194 | "image": "mcr.microsoft.com/azure-stream-analytics/azureiotedge:1.0.9-linux-arm32v7", 195 | "createOptions": {} 196 | }, 197 | "env": { 198 | "PlanId": { 199 | "value": "stream-analytics-on-iot-edge" 200 | } 201 | } 202 | } 203 | } 204 | } 205 | }, 206 | "$edgeHub": { 207 | "properties.desired": { 208 | "schemaVersion": "1.0", 209 | "routes": { 210 | "NVIDIADeepStreamSDKToDeepStreamAnalytics": "FROM /messages/modules/NVIDIADeepStreamSDK/outputs/* INTO BrokeredEndpoint(\"/modules/DeepStreamAnalytics/inputs/DeepStreamInput\")", 211 | "DeepStreamAnalyticsToIoTHub": "FROM /messages/modules/DeepStreamAnalytics/outputs/SummarizedDetections INTO $upstream", 212 | "azureblobstorageoniotedgeToIoTHub": "FROM /messages/modules/azureblobstorageoniotedge/outputs/* INTO $upstream" 213 | }, 214 | "storeAndForwardConfiguration": { 215 | "timeToLiveSecs": 7200 216 | } 217 | } 218 | }, 219 | "azureblobstorageoniotedge": { 220 | "properties.desired": { 221 | "deviceAutoDeleteProperties": { 222 | "deleteOn": false, 223 | "retainWhileUploading": true 224 | }, 225 | "deviceToCloudUploadProperties": { 226 | "uploadOn": true, 227 | "uploadOrder": "OldestFirst", 228 | "cloudStorageConnectionString": "$CLOUD_STORAGE_CONNECTION_STRING", 229 | "storageContainersForUpload": { 230 | "$LOCAL_STORAGE_ACCOUNT_NAME": { 231 | "target": "$DESTINATION_STORAGE_NAME" 232 | } 233 | }, 234 | "deleteAfterUpload": true 235 | } 236 | } 237 | }, 238 | "DeepStreamAnalytics": { 239 | "properties.desired": { 240 | "ASAJobInfo": "https://acceleratededgesasstore.blob.core.windows.net/deepstreamanalytics/ASAEdgeJobs/585778d0-7865-474f-b97b-19b5d3959d2d/4a34bf79-efe2-4255-b858-473058e4c533/ASAEdgeJobDefinition.zip?sv=2018-03-28&sr=b&sig=k%2B6z4JklD1KUpggFQJlFLaFFCOGP4DxUH%2Ff5H77JCBc%3D&st=2020-11-02T15%3A00%3A47Z&se=2023-11-02T15%3A10%3A47Z&sp=r" 241 | } 242 | } 243 | } 244 | } -------------------------------------------------------------------------------- /docs/Module 1 - Introduction to NVIDIA DeepStream.md: -------------------------------------------------------------------------------- 1 | ## Module 1 - Introduction to NVIDIA DeepStream 2 | 3 | The [NVIDIA DeepStream SDK](https://developer.nvidia.com/deepstream-sdk) delivers a complete streaming analytics toolkit for AI based video and image understanding and multi-sensor processing. DeepStream SDK features hardware-accelerated building blocks, called plugins that bring deep neural networks and other complex processing tasks into a stream processing pipeline. 4 | 5 | The deepstream offering contains the DeepStream SDK which include an app (deepstream-test5) that is configurable to handle multiple streams and multiple networks for inference. The app can be connected to the [Azure IoT Edge runtime](https://docs.microsoft.com/en-us/azure/iot-edge/about-iot-edg?WT.mc_id=julyot-iva-pdecarlo) to send messages to a configured [Azure IoT Hub](https://docs.microsoft.com/en-us/azure/iot-hub/?WT.mc_id=julyot-iva-pdecarlo). 6 | 7 | The [DeepStream SDK is offered in the Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/nvidia.deepstream-iot?WT.mc_id=julyot-iva-pdecarlo) as an [IoT Edge Module](https://docs.microsoft.com/en-us/azure/iot-edge/quickstart-linux?WT.mc_id=julyot-iva-pdecarlo). We will employ this mechanism to configure and run a DeepStream workload on an NVIDIA embedded device. 8 | 9 | Before continuing, it is highly suggested to familiarize with the [DeepStream SDK Documentation](http://aka.ms/deepstreamdevguide), as it will provide you with the details on how to customize the Intelligent Video Analytics solution to your needs. 10 | 11 | We cover pretty much everything you need to know in this 90 minute livestream titled "[Getting Started with NVIDIA Jetson: Object Detection](https://www.youtube.com/watch?v=yZz-4uOx_Js)". We highly recommend that you give a watch before proceeding to the next section. 12 | 13 | [![Getting Started with NVIDIA Jetson: Object Detection](../assets/LiveStream1.PNG)](https://www.youtube.com/watch?v=yZz-4uOx_Js) -------------------------------------------------------------------------------- /docs/Module 5 - Visualizing Object Detection Data in Near Real-Time with PowerBI.md: -------------------------------------------------------------------------------- 1 | ## Module 5 : Visualizing Object Detection Data in Near Real-Time with PowerBI 2 | 3 | Power BI is a business analytics service provided by Microsoft. It provides interactive visualizations with self-service business intelligence capabilities, where end users can create reports and dashboards by themselves, without having to depend on information technology staff or database administrators. 4 | 5 | In this module, we will cover how to forward object detection telemetry from our Azure IoT Hub into a PowerBI dataset using a cloud-based Azure Stream Analytics job. This will allow us to build a report that can be refreshed to update as detections are produced. We will then Publish a PowerBI report and convert it to a live dashboard. From there, we can query our data with natural language and interact with our data in near real-time. 6 | 7 | In order to complete this module, it will require that you have an active PowerBI account. If you need to create one, this [video](https://channel9.msdn.com/Blogs/BretStateham/Signing-up-for-Power-BI) walks through the process. 8 | 9 | If you wish to follow along with the steps in this module, we have recorded a livestream presentation titled "[Visualizing Object Detection Data in Near Real-Time with PowerBI](https://www.youtube.com/watch?v=lhvPbNF9eb4)" that walks through the steps below in great detail. 10 | 11 | [![Visualizing Object Detection Data in Near Real-Time with PowerBI](../assets/LiveStream5.PNG)](https://www.youtube.com/watch?v=lhvPbNF9eb4) 12 | 13 | ## Module 5.1 : Forwarding telemetry from IoT Hub to PowerBI using a Cloud-Based Azure Stream Analytics Job 14 | 15 | Before attempting these steps, ensure that your NVIDIA Jetson device is sending live data to the associated Azure IoT Hub. You can do this withing Visual Studio Code by expanding the Azure IoT Hub Extension (which should be configured to the associated IoT Hub) then select "Devices", select the current device then right-click it and choose "Start Monitoring Built-in Event Endpoint": 16 | 17 | ![Monitor Built-in Event Endpoint VSCode](../assets/MonitorEndpoint.PNG) 18 | 19 | After about 15 seconds, you should begin seeing data in the `OUTPUT` window: 20 | 21 | ![Monitor Built-in Event Endpoint VSCode Output](../assets/MonitorEndpointOutput.PNG) 22 | 23 | Once you have confirmed that data is flowing, we can now create the backend services that will be used to ingest this live data. 24 | 25 | [Azure Stream Analytics enables you to take advantage of one of the leading business intelligence tools](https://docs.microsoft.com/en-us/azure/stream-analytics/stream-analytics-power-bi-dashboard?WT.mc_id=julyot-iva-pdecarlo), Microsoft [Power BI](https://docs.microsoft.com/en-us/power-bi/fundamentals/power-bi-overview?WT.mc_id=julyot-iva-pdecarlo). In this section, you will learn how to configure Power BI as an output from a Azure Stream Analytics job that forwards data arriving into our IoT Hub. 26 | 27 | To begin, we will create a PowerBI workspace to publish our dataset and reports to. Navigate to powerbi.microsoft.com and log in. Next, select the "Workspace" icon then "Create a workplace": 28 | 29 | ![Power BI Workspace](../assets/PowerBIWorkspace.PNG) 30 | 31 | In the resulting prompt, name your workspace "Intelligent Video Analytics" and select "Save": 32 | 33 | ![Power BI Workspace Add](../assets/PowerBIWorkspaceAdd.PNG) 34 | 35 | Next, we will create a new consumer group to allow access to messages produced by our IoT Hub. IoT Hubs limit the number of readers within one consumer group (to 5) and this will ensure that any future modifications to the solution do not impact ability to process messages. To accomplish this, navigate to your IoT Hub instance and select "Built-in Endpoints" then create a new "Consumer Group" named "sas" as shown : 36 | 37 | ![Azure IoT Hub Consumer Group](../assets/IoTHubConsumerGroup.PNG) 38 | 39 | Navigate to the Azure Marketplace and search for 'Stream Analytics job' 40 | 41 | ![Azure SAS on Cloud Marketplace](../assets/AzureSASonCloudMarketplace.PNG) 42 | 43 | Select "Create": 44 | 45 | ![Azure SAS on Cloud Create](../assets/AzureSASonCloudCreate.PNG) 46 | 47 | Name the Stream Analytics Job, ensure that it is deployed into the same region as the original IoT Hub, ensure Hosting environment is set to "Cloud", set the "Stream units" to "1", then select "Create": 48 | 49 | ![Deploy Azure SAS on Cloud](../assets/DeploySASonCloud.PNG) 50 | 51 | Navigate to the newly created job and select "Inputs", here will configure the input alias used in to forward data from out IoT Hub. Naming is extremely important in this step and must match the alias used in the query. 52 | 53 | Select, "Add stream input", then in the resulting drop-down select "IoT Hub", then name the "Input Alias" to "IoTHub-Input" (naming is very important in this step!) and ensure that the Consumer groups is set to "sas" as shown: 54 | 55 | ![Azure SAS on Cloud Input](../assets/AzureSASonCloudInput.PNG) 56 | 57 | Next, navigate to "Output", where where we will will configure the output alias used to push data into a PowerBI sink. Select "Add" then select "Power BI" and authorize the PowerBI service in the resulting prompt: 58 | 59 | ![Azure SAS on Cloud PowerBI Auth](../assets/PowerBIAuth.PNG) 60 | 61 | In the resulting prompt, name the "Output Alias" to "StreamAnalytics-Cloud-Output" (naming is very important in this step!). Set "Group Workspace" to the value used earlier when we created a new workspace at powerbi.microsoft.com ("Intelligent Video Analytics"), set "Dataset name" to "Intelligent Video Analytics Dataset", and set "Table name" to "Intelligent Video Analytics Table". Finally, set Authentication mode to "User Token": 62 | 63 | ![Azure SAS on Cloud Output](../assets/AzureSASonCloudOutput.PNG) 64 | 65 | Navigate back to the newly created job and select "Query", then edit the Query to contain the contents of [IoTHubToPowerBI.sql](../services/AZURE_STREAMING_ANALYTICS/Cloud/IoTHubToPowerBI.sql) and save the Query: 66 | 67 | ![Azure SAS on Cloud Query](../assets/AzureSASonCloudQuery.PNG) 68 | 69 | As long as data is flowing into your IoT Hub, you can select "Test query" and it should produce results. If not, double-check that your Input and Output alias names match those specified in the query: 70 | 71 | ![Azure SAS on Cloud Test](../assets/AzureSASonCloudTest.PNG) 72 | 73 | Next, navigate to the "Overview" section of the new Stream Analytics Job and select "Start" to begin running the job: 74 | 75 | ![Azure SAS on Cloud Start](../assets/AzureSASonCloudStart.PNG) 76 | 77 | After a few minutes, you should see that a new DataSet has been created under your workspace at powerbi.microsoft.com: 78 | 79 | ![PowerBI Dataset](../assets/PowerBIDataset.PNG) 80 | 81 | ## Module 5.2 : Import PowerBI Template and publish PowerBI Report 82 | 83 | For the next step, you will need to install the [Power BI Desktop application](https://powerbi.microsoft.com/en-us/desktop/) (requires a modern Windows OS). We will use a pre-supplied template to rapidly produce a dashboard for viewing object detection telemetry. 84 | 85 | To begin, open Power BI Desktop and select "File" => "Import" => "Power BI template": 86 | 87 | ![PowerBI Import](../assets/PowerBIImport.PNG) 88 | 89 | Next, navigate to the source folder of this repo and open the "services\POWER_BI\DeepStream+PowerBI.pbit" template file: 90 | 91 | ![PowerBI Import File](../assets/PowerBIImportFile.PNG) 92 | 93 | You may receive a message indicate that PowerBI is unable to connect to the Data Source, we can configure the template to attach to our newly published dataset by selecting "Transform data" => "Data Source Settings" then select the dataset published previously ("Intelligent Video Analytics Dataset"). If you followed the suggested naming convention in the previous module, the data should import and display without issue, otherwise you may need to reconfigure some of the parameters for the pre-supplied template: 94 | 95 | ![PowerBI Dataset](../assets/PowerBIDatasource.PNG) 96 | 97 | To publish the template, select "Publish", save and name the report "Intelligent Video Analytics Report", then publish the "Intelligent Video Analytics" workspace, you should receive a prompt indicating success, click the link to open your report in PowerBI online: 98 | 99 | ![PowerBI Publish Report](../assets/PowerBIPublishReport.PNG) 100 | 101 | ## Module 5.3 : Create a live PowerBI Dashboard in PowerBI Online 102 | 103 | You should now see a view of your published report in PowerBI online, select the "Pin Live" button to pin your report to a live dashboard. Name the dashboard "Intelligent Video Analytics Dashboard" as shown: 104 | 105 | ![PowerBI Pin Live](../assets/PowerBIPinLive.PNG) 106 | 107 | Navigate to the "Intelligent Video Analytics" workspace and select Dashboard, you should see the newly pinned live dashboard, select it: 108 | ![PowerBI Pin Live](../assets/PowerBINewDashboard.PNG) 109 | 110 | Notice that there is now a section to "Ask a question about your data": 111 | ![PowerBI Ask Question](../assets/PowerBIAskQuestion.PNG) 112 | 113 | Try some of these examples: 114 | ``` 115 | Max of count person in last minute by sensor id 116 | MAX of count car in yard in last minute 117 | AVG count of car in Street in last minute 118 | LAST person by sensorid 119 | Pie Chart count of person by sensorid in last day (@timestamp) 120 | ``` 121 | 122 | For each example, feel free to modify the [object] value, once you have obtained a desirable result, you can pin the visual to your dashboard: 123 | 124 | ![PowerBI Pin Visual](../assets/PowerBIPinViz.PNG) 125 | 126 | Repeat this process until you have a desirable result: 127 | 128 | ![PowerBI Pinned](../assets/PowerBIPinned.PNG) 129 | 130 | ## Module 5.4 : Add a Streaming Data Tile to PowerBI Dashboard in PowerBI Online 131 | 132 | Next, we will add a Streaming data tile by selecting "Add tile" => "Custom Streaming Data": 133 | 134 | ![PowerBI Tile](../assets/PowerBITile.PNG) 135 | 136 | Select the "Intelligent Video Analytics Dataset", then "Next": 137 | 138 | ![PowerBI Tile Dataset](../assets/PowerBITileDataset.PNG) 139 | 140 | On the next screen, set "Visualization Type" to "Line chart", then configure the "Axis", "Legend", and "Values" as shown, then select "Apply": 141 | 142 | ![PowerBI Tile Viz](../assets/PowerBITileViz.PNG) 143 | 144 | Your new visualization will now appear on the Dasbhoard and begin plotting detection telemetry in near real-time (approximately every 15 seconds if using the default Stream Analytics on Edge Job): 145 | ![PowerBI Tile Viz name](../assets/PowerBITileVizName.PNG) 146 | 147 | Repeat this process until you have a desirable result. 148 | 149 | ## Module 5.5 : Next Steps 150 | 151 | Congratulations! At this point, you have developed a full end-to-end Intelligent Video Analytics pipeline to report and visualize object detection data into PowerBI! If you are curious about additional techniques to apply to your PowerBI dashboard, check out these resources: 152 | 153 | * [PowerBI Documentation](https://docs.microsoft.com/en-us/power-bi/) 154 | * [PowerBI Themes Gallery](https://community.powerbi.com/t5/Themes-Gallery/bd-p/ThemesGallery) -------------------------------------------------------------------------------- /modules/IoTCentralBridge/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (http://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # Typescript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/arm64v8.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM arm64v8/node:13-slim 2 | 3 | RUN apt-get update && apt-get install -y --no-install-recommends \ 4 | net-tools \ 5 | unzip \ 6 | systemd-sysv \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | ENV WORKINGDIR /app 10 | WORKDIR ${WORKINGDIR} 11 | 12 | ADD package.json ${WORKINGDIR}/package.json 13 | ADD tslint.json ${WORKINGDIR}/tslint.json 14 | ADD tsconfig.json ${WORKINGDIR}/tsconfig.json 15 | ADD src ${WORKINGDIR}/src 16 | 17 | RUN npm install -q && \ 18 | ./node_modules/typescript/bin/tsc -p . && \ 19 | ./node_modules/tslint/bin/tslint -p ./tsconfig.json && \ 20 | npm prune --production && \ 21 | rm -f tslint.json && \ 22 | rm -f tsconfig.json && \ 23 | rm -rf src 24 | 25 | # HEALTHCHECK \ 26 | # --interval=30s \ 27 | # --timeout=30s \ 28 | # --start-period=60s \ 29 | # --retries=3 \ 30 | # CMD curl -f http://localhost:9014/health || exit 1 31 | 32 | EXPOSE 9014 33 | 34 | CMD ["node", "./dist/index"] 35 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "", 4 | "image": { 5 | "repository": "toolboc/iotcentralbridge", 6 | "tag": { 7 | "version": "0.1.0", 8 | "platforms": { 9 | "arm64v8": "./arm64v8.Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [] 13 | }, 14 | "language": "javascript" 15 | } 16 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "iot-central-bridge", 3 | "version": "0.0.50", 4 | "description": "NVIDIA Jetson Nano IoT Central module", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "node ./node_modules/jest/bin/jest.js", 8 | "tslint": "node ./node_modules/tslint/bin/tslint -p ./tsconfig.json", 9 | "build": "node ./node_modules/typescript/bin/tsc -p .", 10 | "dockerbuild": "node ./scripts/dockerBuild.js --docker-build", 11 | "dockerpush": "node ./scripts/dockerBuild.js --docker-push", 12 | "preversion": "npm test", 13 | "version": "node ./scripts/dockerBuild.js --docker-build", 14 | "postversion": "node ./scripts/dockerBuild.js --docker-push", 15 | "postinstall": "node ./scripts/setupDevEnvironment.js" 16 | }, 17 | "author": "sseiber", 18 | "license": "MIT", 19 | "repository": { 20 | "type": "git", 21 | "url": "git@github.com:sseiber/nvidia-nano-airlift.git" 22 | }, 23 | "dependencies": { 24 | "@hapi/boom": "^9.0.0", 25 | "@hapi/good": "^9.0.0", 26 | "@hapi/good-console": "^9.0.0", 27 | "@hapi/good-squeeze": "^6.0.0", 28 | "@hapi/hapi": "^19.0.2", 29 | "azure-iot-device": "^1.12.1", 30 | "azure-iot-device-mqtt": "^1.11.1", 31 | "fs-extra": "^8.1.0", 32 | "ip": "^1.1.5", 33 | "lodash.defaults": "^4.2.0", 34 | "lodash.get": "^4.4.2", 35 | "lodash.random": "^3.2.0", 36 | "lodash.set": "^4.3.2", 37 | "nconf": "^0.10.0", 38 | "pjson": "^1.0.9", 39 | "query-string": "^6.9.0", 40 | "request": "^2.88.0", 41 | "rimraf": "^3.0.0", 42 | "spryly": "^1.0.19", 43 | "uuid": "^3.3.3" 44 | }, 45 | "devDependencies": { 46 | "@types/azure": "^0.9.20", 47 | "@types/jest": "^24.0.25", 48 | "@types/nconf": "0.10.0", 49 | "@types/node": "^13.1.4", 50 | "@types/request": "^2.48.4", 51 | "jest": "^24.9.0", 52 | "ts-jest": "^24.3.0", 53 | "tslint": "^5.20.1", 54 | "typescript": "^3.7.4" 55 | }, 56 | "jest": { 57 | "rootDir": "./src/", 58 | "testRegex": "(/__tests__/.*|\\.(test|spec))\\.(ts|tsx)$", 59 | "moduleFileExtensions": [ 60 | "ts", 61 | "tsx", 62 | "js", 63 | "json" 64 | ], 65 | "transform": { 66 | ".tsx?": "ts-jest" 67 | }, 68 | "moduleNameMapper": { 69 | "\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga)$": "./__mocks__/fileMock.js" 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/apis/health.ts: -------------------------------------------------------------------------------- 1 | import { inject, RoutePlugin, route } from 'spryly'; 2 | import { Request, ResponseToolkit } from '@hapi/hapi'; 3 | import { HealthService } from '../services/health'; 4 | import { 5 | badRequest as boom_badRequest 6 | } from '@hapi/boom'; 7 | import * as _get from 'lodash.get'; 8 | 9 | export class HealthRoutes extends RoutePlugin { 10 | @inject('health') 11 | private health: HealthService; 12 | 13 | @route({ 14 | method: 'GET', 15 | path: '/health', 16 | options: { 17 | tags: ['health'], 18 | description: 'Health status', 19 | auth: false 20 | } 21 | }) 22 | // @ts-ignore (request) 23 | public async getHealth(request: Request, h: ResponseToolkit) { 24 | try { 25 | const healthState = await this.health.checkHealthState(); 26 | 27 | return h.response(`HealthState: ${healthState}`).code(200); 28 | } 29 | catch (ex) { 30 | throw boom_badRequest(ex.message); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/apis/index.ts: -------------------------------------------------------------------------------- 1 | import { HealthRoutes } from './health'; 2 | import { ModuleRoutes } from './module'; 3 | 4 | export default [ 5 | HealthRoutes, 6 | ModuleRoutes 7 | ]; 8 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/apis/module.ts: -------------------------------------------------------------------------------- 1 | import { inject, RoutePlugin, route } from 'spryly'; 2 | import { Request, ResponseToolkit } from '@hapi/hapi'; 3 | import { ModuleService } from '../services/module'; 4 | import { 5 | badRequest as boom_badRequest 6 | } from '@hapi/boom'; 7 | import * as _get from 'lodash.get'; 8 | 9 | export class ModuleRoutes extends RoutePlugin { 10 | @inject('module') 11 | private module: ModuleService; 12 | 13 | // Sample route capability provided by this RESTful service 14 | // Documentation: 15 | // https://hapi.dev/api/?v=18.4.0 16 | 17 | @route({ 18 | method: 'POST', 19 | path: '/api/v1/module/sample1', 20 | options: { 21 | tags: ['module'], 22 | description: 'route1 example' 23 | } 24 | }) 25 | // @ts-ignore (request) 26 | public async postSample1(request: Request, h: ResponseToolkit) { 27 | try { 28 | const testparam = _get(request, 'payload.testparam'); 29 | 30 | const result = await this.module.sample1(testparam); 31 | 32 | return h.response(result).code(201); 33 | } 34 | catch (ex) { 35 | throw boom_badRequest(ex.message); 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/index.ts: -------------------------------------------------------------------------------- 1 | import { manifest } from './manifest'; 2 | import { compose, ComposeOptions } from 'spryly'; 3 | import { 4 | type as osType, 5 | cpus as osCpus, 6 | freemem as osFreeMem, 7 | totalmem as osTotalMem 8 | } from 'os'; 9 | import { forget } from './utils'; 10 | 11 | const composeOptions: ComposeOptions = { 12 | relativeTo: __dirname, 13 | logger: (t, m) => { 14 | const tags = ((t && Array.isArray(t)) ? `[opt,${t.join(',')}]` : '[opt]'); 15 | 16 | // tslint:disable-next-line:no-console 17 | console.log(`[${new Date().toTimeString()}] ${tags} ${m}`); 18 | } 19 | }; 20 | 21 | // process.on('unhandledRejection', (e: any) => { 22 | // // tslint:disable:no-console 23 | // console.log(['startup', 'error'], `Excepction on startup... ${e.message}`); 24 | // console.log(['startup', 'error'], e.stack); 25 | // // tslint:enable:no-console 26 | // }); 27 | 28 | async function start() { 29 | try { 30 | const server = await compose(manifest(), composeOptions); 31 | 32 | server.log(['startup', 'info'], `🚀 Starting HAPI server instance...`); 33 | 34 | await server.start(); 35 | 36 | server.log(['startup', 'info'], `✅ Core server started`); 37 | server.log(['startup', 'info'], `🌎 ${server.info.uri}`); 38 | server.log(['startup', 'info'], ` > Hapi version: ${server.version}`); 39 | server.log(['startup', 'info'], ` > Plugins: [${Object.keys(server.registrations).join(', ')}]`); 40 | server.log(['startup', 'info'], ` > Machine: ${osType()}, ${osCpus().length} core, ` + 41 | `freemem=${(osFreeMem() / 1024 / 1024).toFixed(0)}mb, totalmem=${(osTotalMem() / 1024 / 1024).toFixed(0)}mb`); 42 | 43 | server.log(['startup', 'info'], `👨‍💻 Starting IoT Central provisioning`); 44 | await (server.methods.iotCentral as any).connectToIoTCentral(); 45 | server.log(['startup', 'info'], `👩‍💻 Finished IoT Central provisioning`); 46 | 47 | server.log(['startup', 'info'], `📷 Starting runtime initialzation`); 48 | await (server.methods.module as any).startService(); 49 | server.log(['startup', 'info'], `📸 Finished runtime initialization`); 50 | } 51 | catch (error) { 52 | // tslint:disable-next-line:no-console 53 | console.log(`['startup', 'error'], 👹 Error starting server: ${error.message}`); 54 | } 55 | } 56 | 57 | forget(start); 58 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/manifest.ts: -------------------------------------------------------------------------------- 1 | import { ComposeManifest } from 'spryly'; 2 | import { resolve as pathResolve } from 'path'; 3 | 4 | const DefaultPort = 9014; 5 | const PORT = process.env.PORT || process.env.port || process.env.PORT0 || process.env.port0 || DefaultPort; 6 | 7 | export function manifest(config?: any): ComposeManifest { 8 | return { 9 | server: { 10 | port: PORT, 11 | app: { 12 | rootDirectory: pathResolve(__dirname, '..'), 13 | storageRootDirectory: process.env.DATAMISC_ROOT || '/data/misc/storage', 14 | slogan: 'NVIDIA Jetson Nano local service' 15 | } 16 | }, 17 | services: [ 18 | './services' 19 | ], 20 | plugins: [ 21 | ...[ 22 | { 23 | plugin: '@hapi/good', 24 | options: generateLoggingOptions(config) 25 | } 26 | ], 27 | // ...[ 28 | // { 29 | // plugin: './plugins' 30 | // } 31 | // ], 32 | ...[ 33 | { 34 | plugin: './apis' 35 | } 36 | ] 37 | ] 38 | }; 39 | } 40 | 41 | // @ts-ignore (config) 42 | function generateLoggingOptions(config: any) { 43 | return { 44 | ops: { 45 | interval: 1000 46 | }, 47 | reporters: { 48 | console: [ 49 | { 50 | module: '@hapi/good-squeeze', 51 | name: 'Squeeze', 52 | args: [ 53 | { 54 | log: '*', 55 | response: '*', 56 | request: '*', 57 | error: '*' 58 | } 59 | ] 60 | }, 61 | { 62 | module: '@hapi/good-console', 63 | args: [ 64 | { 65 | format: '[[]hh:mm:ss [GMT]ZZ[]]', 66 | utc: false 67 | } 68 | ] 69 | }, 70 | 'stdout' 71 | ] 72 | } 73 | }; 74 | } 75 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/__tests__/module.ts: -------------------------------------------------------------------------------- 1 | import { ModuleService } from '../module'; 2 | 3 | it('should be constructed', () => { 4 | const testInstance = new ModuleService(); 5 | expect(testInstance).toBeDefined(); 6 | }); 7 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/config.ts: -------------------------------------------------------------------------------- 1 | import { service } from 'spryly'; 2 | import * as nconf from 'nconf'; 3 | 4 | @service('config') 5 | export class ConfigService { 6 | private config: nconf.Provider; 7 | 8 | public async init() { 9 | this.config = nconf.env().file(`./configs/${process.env.NODE_ENV}.json`); 10 | } 11 | 12 | public get(key: string): any { 13 | return this.config.get(key); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/device.ts: -------------------------------------------------------------------------------- 1 | import { inject, service } from 'spryly'; 2 | import { Server } from '@hapi/hapi'; 3 | import { ConfigService } from './config'; 4 | import { LoggingService } from './logging'; 5 | import { 6 | IoTCentralService, 7 | ModuleInfoFieldIds, 8 | ModuleState, 9 | PipelineState 10 | } from './iotCentral'; 11 | import { promisify } from 'util'; 12 | import { exec } from 'child_process'; 13 | import * as request from 'request'; 14 | import * as _get from 'lodash.get'; 15 | import { bind } from '../utils'; 16 | 17 | const defaultDockerApiVersion: string = '1.37'; 18 | const defaultDockerSocket: string = '/var/run/docker.sock'; 19 | const defaultDeepStreamModuleName: string = 'deepstream'; 20 | 21 | @service('device') 22 | export class DeviceService { 23 | @inject('$server') 24 | private server: Server; 25 | 26 | @inject('config') 27 | private config: ConfigService; 28 | 29 | @inject('logger') 30 | private logger: LoggingService; 31 | 32 | @inject('iotCentral') 33 | private iotCentral: IoTCentralService; 34 | 35 | private dockerApiVersion: string = defaultDockerApiVersion; 36 | private dockerSocket: string = defaultDockerSocket; 37 | private deepStreamModuleName: string = defaultDeepStreamModuleName; 38 | 39 | public async init(): Promise { 40 | this.logger.log(['DeviceService', 'info'], 'initialize'); 41 | 42 | this.server.method({ name: 'device.restartDeepStream', method: this.restartDeepStream }); 43 | this.server.method({ name: 'device.restartDevice', method: this.restartDevice }); 44 | this.server.method({ name: 'device.restartDockerImage', method: this.restartDockerImage }); 45 | 46 | this.dockerApiVersion = this.config.get('dockerApiVersion') || defaultDockerApiVersion; 47 | this.dockerSocket = this.config.get('dockerSocket') || defaultDockerSocket; 48 | this.deepStreamModuleName = this.config.get('deepStreamModuleName') || defaultDeepStreamModuleName; 49 | } 50 | 51 | @bind 52 | public async restartDeepStream(): Promise { 53 | await this.iotCentral.sendMeasurement({ 54 | [ModuleInfoFieldIds.Event.VideoStreamProcessingStopped]: 'NVIDIA DeepStream', 55 | [ModuleInfoFieldIds.State.ModuleState]: ModuleState.Inactive 56 | }); 57 | 58 | await this.iotCentral.setPipelineState(PipelineState.Inactive); 59 | 60 | return this.restartDockerImage(); 61 | } 62 | 63 | @bind 64 | public async restartDevice(timeout: number, reason: string): Promise { 65 | this.logger.log(['DeviceService', 'info'], `Module restart requested...`); 66 | if (_get(process.env, 'LOCAL_DEBUG') === '1') { 67 | return; 68 | } 69 | 70 | try { 71 | await this.iotCentral.sendMeasurement({ 72 | [ModuleInfoFieldIds.Event.DeviceRestart]: reason, 73 | [ModuleInfoFieldIds.Event.VideoStreamProcessingStopped]: 'NVIDIA DeepStream', 74 | [ModuleInfoFieldIds.State.ModuleState]: ModuleState.Inactive 75 | }); 76 | 77 | await this.iotCentral.setPipelineState(PipelineState.Inactive); 78 | 79 | if (timeout > 0) { 80 | await new Promise((resolve) => { 81 | setTimeout(() => { 82 | return resolve(); 83 | }, 1000 * timeout); 84 | }); 85 | } 86 | 87 | await promisify(exec)(`reboot --reboot`); 88 | } 89 | catch (ex) { 90 | this.logger.log(['DeviceService', 'error'], `Failed to auto-restart device - will exit container now: ${ex.message}`); 91 | } 92 | 93 | // let Docker restart our container 94 | process.exit(1); 95 | } 96 | 97 | @bind 98 | private async restartDockerImage(containerName?: string): Promise { 99 | this.logger.log(['DeviceService', 'info'], `Restarting DeepStream container...`); 100 | 101 | const options = { 102 | method: 'POST', 103 | socketPath: this.dockerSocket, 104 | uri: `http://v${this.dockerApiVersion}/containers/${containerName || this.deepStreamModuleName}/restart`, 105 | json: true 106 | }; 107 | 108 | return this.dockerRequest(options); 109 | } 110 | 111 | private dockerRequest(options: any): Promise { 112 | return new Promise((resolve, reject) => { 113 | request(options, (requestError, response, body) => { 114 | if (requestError) { 115 | this.logger.log(['DeviceService', 'error', 'dockerRequest'], `dockerRequest error: ${requestError.message}`); 116 | return reject(requestError); 117 | } 118 | 119 | if (response.statusCode < 200 || response.statusCode > 299) { 120 | this.logger.log(['DeviceService', 'error', 'dockerRequest'], `Response status code = ${response.statusCode}`); 121 | 122 | const errorMessage = body.message || body || 'An error occurred'; 123 | return reject(new Error(`Error statusCode: ${response.statusCode}, ${errorMessage}`)); 124 | } 125 | 126 | return resolve(body); 127 | }); 128 | }); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/health.ts: -------------------------------------------------------------------------------- 1 | import { service, inject } from 'spryly'; 2 | import { Server } from '@hapi/hapi'; 3 | import { LoggingService } from './logging'; 4 | import { ModuleService } from './module'; 5 | import { DeviceService } from './device'; 6 | import * as _get from 'lodash.get'; 7 | import { bind } from '../utils'; 8 | 9 | export const healthCheckInterval = 15; 10 | // const healthCheckTimeout = 30; 11 | const healthCheckStartPeriod = 60; 12 | const healthCheckRetries = 3; 13 | 14 | export const HealthState = { 15 | Good: 1, 16 | Warning: 0, 17 | Critical: 0 18 | }; 19 | 20 | @service('health') 21 | export class HealthService { 22 | @inject('$server') 23 | private server: Server; 24 | 25 | @inject('logger') 26 | private logger: LoggingService; 27 | 28 | @inject('module') 29 | private module: ModuleService; 30 | 31 | @inject('device') 32 | private device: DeviceService; 33 | 34 | private heathCheckStartTime = Date.now(); 35 | private failingStreak = 1; 36 | 37 | public async init() { 38 | this.logger.log(['HealthService', 'info'], 'initialize'); 39 | 40 | // Workaround: 41 | // IoT Edge runtime 1.0.7.x has an incompatibility with Dockerfile HEALTHCHECK configurations 42 | // Microsoft Vision AI Dev Kit firmware version v0.4940_Perf uses IoT Edge runtime version 1.0.7.x 43 | // Newer versions of the Dev Kit should contain IoT Edge runtime 1.0.8+ which contains a fix for 44 | // this issue. On those versions you can uncomment the HEALTHCHECK configuration in the Dockerfile 45 | // and rebuild this container and remove the FORCE_HEALTHCHECK environment variable in your 46 | // IoT Edge deployment manifest. 47 | if (_get(process.env, 'LOCAL_DEBUG') === '1' || _get(process.env, 'FORCE_HEALTHCHECK') === '1') { 48 | setInterval(async () => { 49 | const cameraHealth = await this.checkHealthState(); 50 | 51 | if (cameraHealth < HealthState.Good) { 52 | if ((Date.now() - this.heathCheckStartTime) > (1000 * healthCheckStartPeriod) && ++this.failingStreak >= healthCheckRetries) { 53 | await(this.server.methods.device as any).restartDevice('HealthService:checkHealthState'); 54 | } 55 | } 56 | else { 57 | this.heathCheckStartTime = Date.now(); 58 | this.failingStreak = 0; 59 | } 60 | }, (1000 * healthCheckInterval)); 61 | } 62 | } 63 | 64 | @bind 65 | public async checkHealthState(): Promise { 66 | this.logger.log(['HealthService', 'info'], 'Health check interval'); 67 | 68 | const moduleHealth = await this.module.getHealth(); 69 | 70 | if (moduleHealth < HealthState.Good) { 71 | this.logger.log(['HealthService', 'warning'], `Health check watch: module:${moduleHealth}`); 72 | 73 | if ((Date.now() - this.heathCheckStartTime) > (1000 * healthCheckStartPeriod) && ++this.failingStreak >= healthCheckRetries) { 74 | await this.device.restartDevice(10, 'HealthService:checkHealthState'); 75 | } 76 | } 77 | 78 | return moduleHealth; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/index.ts: -------------------------------------------------------------------------------- 1 | 2 | import { HealthService } from './health'; 3 | import { ConfigService } from './config'; 4 | import { LoggingService } from './logging'; 5 | import { StorageService } from './storage'; 6 | import { StateService } from './state'; 7 | import { ModuleService } from './module'; 8 | import { DeviceService } from './device'; 9 | import { IoTCentralService } from './iotCentral'; 10 | 11 | export default [ 12 | HealthService, 13 | ConfigService, 14 | LoggingService, 15 | StorageService, 16 | StateService, 17 | ModuleService, 18 | DeviceService, 19 | IoTCentralService 20 | ]; 21 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/logging.ts: -------------------------------------------------------------------------------- 1 | import { service, inject } from 'spryly'; 2 | import { Server } from '@hapi/hapi'; 3 | 4 | @service('logger') 5 | export class LoggingService { 6 | @inject('$server') 7 | private server: Server; 8 | 9 | public async init(): Promise { 10 | // tslint:disable-next-line:no-console 11 | console.log(`[${new Date().toTimeString()}] [LoggingService, info] initialize`); 12 | } 13 | 14 | public log(tags: any, message: any) { 15 | const tagsMessage = (tags && Array.isArray(tags)) ? `[${tags.join(', ')}]` : '[]'; 16 | 17 | if (!(this.server.settings.app as any).compositionDone) { 18 | // tslint:disable-next-line:no-console 19 | console.log(`[${new Date().toTimeString()}] [${tagsMessage}] ${message}`); 20 | } 21 | else { 22 | this.server.log(tagsMessage, message); 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/state.ts: -------------------------------------------------------------------------------- 1 | import { service, inject } from 'spryly'; 2 | import { LoggingService } from './logging'; 3 | import { ConfigService } from './config'; 4 | import { StorageService } from './storage'; 5 | import * as _get from 'lodash.get'; 6 | import { v4 as uuidV4 } from 'uuid'; 7 | 8 | @service('state') 9 | export class StateService { 10 | @inject('logger') 11 | private logger: LoggingService; 12 | 13 | @inject('config') 14 | private config: ConfigService; 15 | 16 | @inject('storage') 17 | private storage: StorageService; 18 | 19 | private stateInternal: any; 20 | private stateFile; 21 | 22 | public get system(): any { 23 | return this.stateInternal.system || {}; 24 | } 25 | 26 | public get iotCentral(): any { 27 | return this.stateInternal.iotCentral || {}; 28 | } 29 | 30 | public async init() { 31 | this.logger.log(['StateService', 'info'], 'initialize'); 32 | 33 | this.stateFile = this.config.get('systemName') ? `${this.config.get('systemName')}-state` : 'state'; 34 | 35 | await this.loadState(); 36 | 37 | if (!this.stateInternal.system.systemName) { 38 | this.stateInternal.system.systemName = uuidV4(); 39 | } 40 | 41 | if (!this.stateInternal.system.systemId) { 42 | this.stateInternal.system.systemId = uuidV4(); 43 | } 44 | 45 | await this.flushState(); 46 | } 47 | 48 | public async setIotCentralProperty(property: string, value: any) { 49 | this.stateInternal.iotCentral[property] = value; 50 | 51 | await this.flushState(); 52 | } 53 | 54 | private async loadState() { 55 | try { 56 | this.stateInternal = await this.storage.get(this.stateFile); 57 | if (!this.stateInternal) { 58 | this.stateInternal = { 59 | system: { 60 | systemName: '', 61 | systemId: '' 62 | } 63 | }; 64 | } 65 | 66 | await this.flushState(); 67 | } 68 | catch (ex) { 69 | this.logger.log(['flushState', 'error'], ex.message); 70 | 71 | // eat exeptions 72 | } 73 | } 74 | 75 | private async flushState() { 76 | try { 77 | await this.storage.flush(this.stateFile, this.stateInternal as any); 78 | } 79 | catch (ex) { 80 | this.logger.log(['flushState', 'error'], ex.message); 81 | 82 | // eat exeptions 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/services/storage.ts: -------------------------------------------------------------------------------- 1 | const ROOT = '__ROOT__'; 2 | import { service, inject } from 'spryly'; 3 | import { Server } from '@hapi/hapi'; 4 | import { LoggingService } from './logging'; 5 | import * as fse from 'fs-extra'; 6 | import { resolve as pathResolve } from 'path'; 7 | import * as _get from 'lodash.get'; 8 | import * as _set from 'lodash.set'; 9 | 10 | @service('storage') 11 | export class StorageService { 12 | @inject('$server') 13 | private server: Server; 14 | 15 | @inject('logger') 16 | private logger: LoggingService; 17 | 18 | private setupDone = false; 19 | private storageDirectory; 20 | 21 | public async init() { 22 | this.logger.log(['StorageService', 'info'], 'initialize'); 23 | 24 | this.storageDirectory = (this.server.settings.app as any).dataMiscRootDirectory; 25 | 26 | await this.setup(); 27 | } 28 | 29 | public async get(scope: string, property?: string): Promise { 30 | if (!property) { 31 | property = ROOT; 32 | } 33 | 34 | const obj = await this.readScope(scope); 35 | 36 | if (!obj) { 37 | return null; 38 | } 39 | 40 | if (property === ROOT) { 41 | return obj; 42 | } 43 | 44 | return _get(obj, property); 45 | } 46 | 47 | public async set(scope: string, property: any, value?: any) { 48 | if (!value) { 49 | value = property; 50 | property = ROOT; 51 | } 52 | 53 | const obj = await this.readScope(scope); 54 | 55 | const finalObject = (property === ROOT) 56 | ? value 57 | : _set(obj || {}, property, value); 58 | 59 | return this.writeScope(scope, finalObject); 60 | } 61 | 62 | public async flush(scope: string, property: string, value?: any) { 63 | if (!value) { 64 | value = property; 65 | property = ROOT; 66 | } 67 | 68 | const finalObject = (property === ROOT) 69 | ? value 70 | : _set({}, property, value); 71 | 72 | return this.writeScope(scope, finalObject); 73 | } 74 | 75 | private async setup() { 76 | if (this.setupDone === true) { 77 | return; 78 | } 79 | 80 | await fse.ensureDir(this.storageDirectory); 81 | 82 | this.setupDone = true; 83 | } 84 | 85 | // TODO: 86 | // read/write scope and file tests may need to be synchronous 87 | private async readScope(scope): Promise { 88 | try { 89 | await this.setup(); 90 | 91 | const exists = await fse.pathExists(this.getScopePath(scope)); 92 | if (!exists) { 93 | return null; 94 | } 95 | 96 | return fse.readJson(this.getScopePath(scope)); 97 | } 98 | catch (error) { 99 | return null; 100 | } 101 | } 102 | 103 | private async writeScope(scope, data) { 104 | await this.setup(); 105 | 106 | const writeOptions = { 107 | spaces: 2, 108 | throws: false 109 | }; 110 | 111 | return fse.writeJson(this.getScopePath(scope), data, writeOptions); 112 | } 113 | 114 | private getScopePath(scope) { 115 | return pathResolve(this.storageDirectory, `${scope}.json`); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/bind.ts: -------------------------------------------------------------------------------- 1 | // @ts-ignore 2 | // tslint:disable-next-line:ban-types 3 | export function bind(target: object, propertyKey: string, descriptor: TypedPropertyDescriptor): TypedPropertyDescriptor { 4 | if (!descriptor || (typeof descriptor.value !== 'function')) { 5 | throw new TypeError(`Only methods can be decorated with @bind. <${propertyKey}> is not a method!`); 6 | } 7 | 8 | return { 9 | configurable: true, 10 | get(this: T): T { 11 | const bound: T = descriptor.value!.bind(this); 12 | Object.defineProperty(this, propertyKey, { 13 | value: bound, 14 | configurable: true, 15 | writable: true 16 | }); 17 | return bound; 18 | } 19 | }; 20 | } 21 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/defer.ts: -------------------------------------------------------------------------------- 1 | class DeferredPromise { 2 | public then: any; 3 | public catch: any; 4 | public resolve: any; 5 | public reject: any; 6 | private promiseInternal: any; 7 | 8 | public constructor() { 9 | this.promiseInternal = new Promise((resolve, reject) => { 10 | this.resolve = resolve; 11 | this.reject = reject; 12 | }); 13 | this.then = this.promiseInternal.then.bind(this.promiseInternal); 14 | this.catch = this.promiseInternal.catch.bind(this.promiseInternal); 15 | } 16 | 17 | public get promise() { 18 | return this.promiseInternal; 19 | } 20 | } 21 | 22 | export function defer() { 23 | return new DeferredPromise(); 24 | } 25 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/emptyObj.ts: -------------------------------------------------------------------------------- 1 | export function emptyObj(object: any) { 2 | if (!object) { 3 | return false; 4 | } 5 | 6 | for (const key in object) { 7 | if (object.hasOwnProperty(key)) { 8 | return false; 9 | } 10 | } 11 | 12 | return true; 13 | } 14 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/forget.ts: -------------------------------------------------------------------------------- 1 | export function forget(fireAndForgetAsyncFunc: any, ...params) { 2 | (async () => { 3 | await fireAndForgetAsyncFunc(...params); 4 | })().catch(); 5 | } 6 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/index.ts: -------------------------------------------------------------------------------- 1 | import { bind } from './bind'; 2 | import { sleep } from './sleep'; 3 | import { forget } from './forget'; 4 | import { pjson } from './pjson'; 5 | import { emptyObj } from './emptyObj'; 6 | import { defer } from './defer'; 7 | 8 | export { 9 | bind, 10 | sleep, 11 | forget, 12 | pjson, 13 | emptyObj, 14 | defer 15 | }; 16 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/pjson.ts: -------------------------------------------------------------------------------- 1 | import * as fse from 'fs-extra'; 2 | import { resolve } from 'path'; 3 | 4 | export function pjson(): any { 5 | let result = {}; 6 | 7 | try { 8 | const packagePath = resolve(__dirname, '..', '..', 'package.json'); 9 | result = fse.readJSONSync(packagePath); 10 | } 11 | catch (ex) { 12 | // eat exception 13 | } 14 | 15 | return result; 16 | } 17 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/src/utils/sleep.ts: -------------------------------------------------------------------------------- 1 | export function sleep(milliseconds: number): Promise { 2 | return new Promise((resolve) => { 3 | setTimeout(() => { 4 | return resolve(); 5 | }, milliseconds); 6 | }); 7 | } 8 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "outDir": "./dist", 4 | "sourceMap": true, 5 | "moduleResolution": "node", 6 | "module": "commonjs", 7 | "target": "es2016", 8 | "jsx": "react", 9 | "noImplicitAny": false, 10 | "noUnusedLocals": true, 11 | "noUnusedParameters": true, 12 | "experimentalDecorators": true, 13 | "emitDecoratorMetadata": true, 14 | "typeRoots": [ 15 | "node_modules/@types" 16 | ], 17 | "lib": [ 18 | "es2016", 19 | "dom" 20 | ] 21 | }, 22 | "include": [ 23 | "./src/**/*.ts" 24 | ], 25 | "exclude": [ 26 | "node_modules", 27 | "scripts", 28 | "storage" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /modules/IoTCentralBridge/tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "tslint:latest" 4 | ], 5 | "rules": { 6 | "no-floating-promises": true, 7 | "no-unused-expression": true, 8 | "no-duplicate-variable": true, 9 | "curly": true, 10 | "class-name": true, 11 | "semicolon": [ 12 | true, 13 | "always" 14 | ], 15 | "triple-equals": true, 16 | "interface-name": [ 17 | false 18 | ], 19 | "max-line-length": [ 20 | true, 21 | 200 22 | ], 23 | "quotemark": [ 24 | true, 25 | "single" 26 | ], 27 | "ordered-imports": [ 28 | false 29 | ], 30 | "object-literal-sort-keys": [ 31 | false 32 | ], 33 | "arrow-parens": [ 34 | false 35 | ], 36 | "max-classes-per-file": [ 37 | false, 38 | 1 39 | ], 40 | "trailing-comma": [ 41 | true, 42 | { 43 | "multiline": "never", 44 | "singleline": "never" 45 | } 46 | ], 47 | "one-line": [ 48 | true, 49 | "check-open-brace" 50 | ], 51 | "no-consecutive-blank-lines": true, 52 | "whitespace": [ 53 | true, 54 | "check-branch", 55 | "check-decl", 56 | "check-operator", 57 | "check-module", 58 | "check-separator", 59 | "check-type", 60 | "check-typecast", 61 | "check-preblock" 62 | ], 63 | "no-irregular-whitespace": true, 64 | "no-trailing-whitespace": true, 65 | "one-variable-per-declaration": [ 66 | true 67 | ], 68 | "prefer-template": true, 69 | "space-before-function-paren": [true, 70 | { 71 | "asyncArrow": "always" 72 | } 73 | ] 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /services/AZURE_STREAMING_ANALYTICS/Cloud/IoTHubToPowerBI.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | * 3 | INTO 4 | [StreamAnalytics-Cloud-Output] 5 | FROM 6 | [IoTHub-Input] TIMESTAMP BY [@timestamp] -------------------------------------------------------------------------------- /services/AZURE_STREAMING_ANALYTICS/Edge/DeepStreamAnalytics.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | FlattenedDetections AS 3 | ( 4 | SELECT 5 | DeepStreamInput.sensorId, 6 | (SUBSTRING (arrayElement.ArrayValue, REGEXMATCH(arrayElement.ArrayValue, '\|[a-z]') + 1, LEN(arrayElement.ArrayValue))) as object, 7 | DeepStreamInput.[@timestamp], COUNT(DeepStreamInput.[@timestamp]) as matches 8 | FROM 9 | [DeepStreamInput] AS DeepStreamInput TIMESTAMP BY DeepStreamInput.[@timestamp] 10 | CROSS APPLY GetArrayElements(objects) AS arrayElement 11 | WHERE 12 | DeepStreamInput.[@timestamp] != '1970-01-01T00:00:00.000Z' /*filter RTSP disconnections*/ 13 | GROUP BY DeepStreamInput.[sensorId], 14 | arrayElement, 15 | DeepStreamInput.[@timestamp], 16 | SYSTEM.TIMESTAMP() 17 | ) 18 | 19 | SELECT 20 | Count(object) AS count, /*Counting function*/ 21 | sensorId, object, [@timestamp] 22 | INTO [AggregatedDetections] 23 | FROM FlattenedDetections 24 | WHERE matches = 1 /*Filter duplicates where (timestamp and object) are equal)*/ 25 | GROUP BY 26 | sensorId, 27 | object, 28 | [@timestamp], 29 | TumblingWindow(second, 15) 30 | 31 | SELECT 32 | FLOOR(AVG(count)) as count, /*Smoothing function*/ 33 | sensorId, object, System.Timestamp AS [@timestamp] 34 | INTO [SummarizedDetections] 35 | FROM AggregatedDetections 36 | GROUP BY 37 | sensorId, 38 | object, 39 | TumblingWindow(second, 15) 40 | 41 | -------------------------------------------------------------------------------- /services/AZURE_STREAMING_ANALYTICS/Edge/SampleInput.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "version" : "4.0", 4 | "id" : 4346, 5 | "@timestamp" : "2020-04-29T10:15:21.569Z", 6 | "sensorId" : "Yard", 7 | "objects" : [ 8 | "e-05|410|351|543|465|Car", 9 | "390|410|351|543|465|Car" 10 | ] 11 | }, 12 | { 13 | "version" : "4.0", 14 | "id" : 4348, 15 | "@timestamp" : "1970-01-01T00:00:00.000Z", 16 | "sensorId" : "Yard", 17 | "objects" : [ 18 | "290|409|351|544|465|Car", 19 | "390|410|351|543|465|Car", 20 | "390|410|351|543|465|Person" 21 | ] 22 | }, 23 | { 24 | "version" : "4.0", 25 | "id" : 4351, 26 | "@timestamp" : "2020-04-29T10:15:22.439Z", 27 | "sensorId" : "Yard", 28 | "objects" : [ 29 | "290|410|351|546|461|Car" 30 | ] 31 | }, 32 | { 33 | "version" : "4.0", 34 | "id" : 4352, 35 | "@timestamp" : "2020-04-29T10:15:22.969Z", 36 | "sensorId" : "House", 37 | "objects" : [ 38 | "290|413|354|545|464|Person" 39 | ] 40 | }, 41 | { 42 | "version" : "4.0", 43 | "id" : 4353, 44 | "@timestamp" : "2020-04-29T10:15:23.110Z", 45 | "sensorId" : "House", 46 | "objects" : [ 47 | "290|412|355|548|463|Person", 48 | "390|412|355|548|463|Person", 49 | "490|412|355|548|463|Person" 50 | ] 51 | }, 52 | { 53 | "version" : "4.0", 54 | "id" : 4354, 55 | "@timestamp" : "2020-04-29T10:15:23.559Z", 56 | "sensorId" : "Yard", 57 | "objects" : [ 58 | "290|407|351|547|466|Person" 59 | ] 60 | }, 61 | { 62 | "version" : "4.0", 63 | "id" : 4355, 64 | "@timestamp" : "2020-04-29T10:15:23.624Z", 65 | "sensorId" : "Yard", 66 | "objects" : [ 67 | "290|413|351|546|465|Car" 68 | ] 69 | }, 70 | { 71 | "version" : "4.0", 72 | "id" : 4356, 73 | "@timestamp" : "2020-04-29T10:15:23.713Z", 74 | "sensorId" : "Yard", 75 | "objects" : [ 76 | "290|410|351|550|465|Car" 77 | ] 78 | }, 79 | { 80 | "version" : "4.0", 81 | "id" : 4357, 82 | "@timestamp" : "2020-04-29T10:15:23.779Z", 83 | "sensorId" : "Yard", 84 | "objects" : [ 85 | "290|407|351|551|471|Car" 86 | ] 87 | } 88 | ] -------------------------------------------------------------------------------- /services/CUSTOM_VISION_AI/LICENSE: -------------------------------------------------------------------------------- 1 |  MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /services/CUSTOM_VISION_AI/labels.txt: -------------------------------------------------------------------------------- 1 | car 2 | cat 3 | dog 4 | person -------------------------------------------------------------------------------- /services/CUSTOM_VISION_AI/model.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/CUSTOM_VISION_AI/model.onnx -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream5.0_JetPack4.4/Makefile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | CUDA_VER?= 24 | ifeq ($(CUDA_VER),) 25 | $(error "CUDA_VER is not set") 26 | endif 27 | CC:= g++ 28 | NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc 29 | 30 | DEEPSTREAM_INCS_PATH:=/opt/nvidia/deepstream/deepstream/sources/includes 31 | NVDSINFER_CUSTOM_IMPL_PATH:=/opt/nvidia/deepstream/deepstream/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 32 | 33 | CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations 34 | CFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include -I $(DEEPSTREAM_INCS_PATH) 35 | 36 | LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs 37 | LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group 38 | 39 | INCS:= $(wildcard *.h) 40 | SRCFILES:= nvdsparsebbox_Yolo.cpp \ 41 | $(NVDSINFER_CUSTOM_IMPL_PATH)/trt_utils.cpp \ 42 | $(NVDSINFER_CUSTOM_IMPL_PATH)/kernels.cu 43 | TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so 44 | 45 | TARGET_OBJS:= $(SRCFILES:.cpp=.o) 46 | TARGET_OBJS:= $(TARGET_OBJS:.cu=.o) 47 | 48 | all: $(TARGET_LIB) 49 | 50 | %.o: %.cpp $(INCS) Makefile 51 | $(CC) -c -o $@ $(CFLAGS) $< 52 | 53 | %.o: %.cu $(INCS) Makefile 54 | $(NVCC) -c -o $@ --compiler-options '-fPIC' $< 55 | 56 | $(TARGET_LIB) : $(TARGET_OBJS) 57 | $(CC) -o $@ $(TARGET_OBJS) $(LFLAGS) 58 | 59 | clean: 60 | rm -rf $(TARGET_LIB) 61 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream5.0_JetPack4.4/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the CustomVision DeepStream YoloParser from source 2 | 1. Ensure that you have installed the DeepStream 5.0 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 2. Clean the build environment with: 4 | ``` 5 | make CUDA_VER=10.2 clean 6 | ``` 7 | 3. Build from source wtih: 8 | ``` 9 | make CUDA_VER=10.2 10 | ``` -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream5.0_JetPack4.4/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream5.0_JetPack4.4/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream5.0_JetPack4.4/nvdsparsebbox_Yolo.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "nvdsinfer_custom_impl.h" 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | /*static const int NUM_CLASSES_YOLO = 2;*/ 33 | 34 | extern "C" bool NvDsInferParseCustomYoloV3( 35 | std::vector const& outputLayersInfo, 36 | NvDsInferNetworkInfo const& networkInfo, 37 | NvDsInferParseDetectionParams const& detectionParams, 38 | std::vector& objectList); 39 | 40 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 41 | std::vector const& outputLayersInfo, 42 | NvDsInferNetworkInfo const& networkInfo, 43 | NvDsInferParseDetectionParams const& detectionParams, 44 | std::vector& objectList); 45 | 46 | extern "C" bool NvDsInferParseCustomYoloV2( 47 | std::vector const& outputLayersInfo, 48 | NvDsInferNetworkInfo const& networkInfo, 49 | NvDsInferParseDetectionParams const& detectionParams, 50 | std::vector& objectList); 51 | 52 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 53 | std::vector const& outputLayersInfo, 54 | NvDsInferNetworkInfo const& networkInfo, 55 | NvDsInferParseDetectionParams const& detectionParams, 56 | std::vector& objectList); 57 | 58 | static unsigned clamp(int val, int minVal, int maxVal) 59 | { 60 | assert(minVal <= maxVal); 61 | return std::min(maxVal, std::max(minVal, val)); 62 | } 63 | 64 | /* This is a sample bounding box parsing function for the sample YoloV3 detector model */ 65 | static NvDsInferParseObjectInfo convertBBox(const float& bx, const float& by, const float& bw, 66 | const float& bh, const int& stride, const uint& netW, 67 | const uint& netH) 68 | { 69 | NvDsInferParseObjectInfo b; 70 | // Restore coordinates to network input resolution 71 | float x = bx * stride; 72 | float y = by * stride; 73 | 74 | b.left = clamp(x - bw / 2, 0, netW); 75 | b.width = clamp(bw, 0, netW - b.left); 76 | b.top = clamp(y - bh / 2, 0, netH); 77 | b.height = clamp(bh, 0, netH - b.top); 78 | 79 | return b; 80 | } 81 | 82 | static void addBBoxProposal(const float bx, const float by, const float bw, const float bh, 83 | const uint stride, const uint& netW, const uint& netH, const int maxIndex, 84 | const float maxProb, std::vector& binfo) 85 | { 86 | NvDsInferParseObjectInfo bbi = convertBBox(bx, by, bw, bh, stride, netW, netH); 87 | 88 | bbi.detectionConfidence = maxProb; 89 | bbi.classId = maxIndex; 90 | 91 | binfo.push_back(bbi); 92 | } 93 | 94 | static std::vector 95 | nonMaximumSuppression(const float nmsThresh, std::vector binfo) 96 | { 97 | auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float { 98 | if (x1min > x2min) 99 | { 100 | std::swap(x1min, x2min); 101 | std::swap(x1max, x2max); 102 | } 103 | return x1max < x2min ? 0 : std::min(x1max, x2max) - x2min; 104 | }; 105 | auto computeIoU 106 | = [&overlap1D](NvDsInferParseObjectInfo& bbox1, NvDsInferParseObjectInfo& bbox2) -> float { 107 | float overlapX 108 | = overlap1D(bbox1.left, bbox1.left + bbox1.width, bbox2.left, bbox2.left + bbox2.width); 109 | float overlapY 110 | = overlap1D(bbox1.top, bbox1.top + bbox1.height, bbox2.top, bbox2.top + bbox2.height); 111 | float area1 = (bbox1.width) * (bbox1.height); 112 | float area2 = (bbox2.width) * (bbox2.height); 113 | float overlap2D = overlapX * overlapY; 114 | float u = area1 + area2 - overlap2D; 115 | return u == 0 ? 0 : overlap2D / u; 116 | }; 117 | 118 | std::stable_sort(binfo.begin(), binfo.end(), 119 | [](const NvDsInferParseObjectInfo& b1, const NvDsInferParseObjectInfo& b2) { 120 | return b1.detectionConfidence > b2.detectionConfidence; 121 | }); 122 | std::vector out; 123 | for (auto i : binfo) 124 | { 125 | bool keep = true; 126 | for (auto j : out) 127 | { 128 | if (keep) 129 | { 130 | float overlap = computeIoU(i, j); 131 | keep = overlap <= nmsThresh; 132 | } 133 | else 134 | break; 135 | } 136 | if (keep) out.push_back(i); 137 | } 138 | return out; 139 | } 140 | 141 | static std::vector 142 | nmsAllClasses(const float nmsThresh, 143 | std::vector& binfo, 144 | const uint numClasses) 145 | { 146 | std::vector result; 147 | std::vector> splitBoxes(numClasses); 148 | for (auto& box : binfo) 149 | { 150 | splitBoxes.at(box.classId).push_back(box); 151 | } 152 | 153 | for (auto& boxes : splitBoxes) 154 | { 155 | boxes = nonMaximumSuppression(nmsThresh, boxes); 156 | result.insert(result.end(), boxes.begin(), boxes.end()); 157 | } 158 | return result; 159 | } 160 | 161 | static float logistic(float x) 162 | { 163 | if (x > 0) { 164 | return 1 / (1 + exp(-x)); 165 | } else { 166 | float e = exp(x); 167 | return e / (1 + e); 168 | } 169 | } 170 | 171 | static std::vector 172 | decodeYoloV2Tensor( 173 | const float* detections, const std::vector &anchors, 174 | const uint gridSize, const uint stride, const uint numBBoxes, 175 | const uint numOutputClasses, const float probThresh, const uint& netW, 176 | const uint& netH) 177 | { 178 | std::vector binfo; 179 | for (uint y = 0; y < gridSize; ++y) 180 | { 181 | for (uint x = 0; x < gridSize; ++x) 182 | { 183 | for (uint b = 0; b < numBBoxes; ++b) 184 | { 185 | const float pw = anchors[b * 2]; 186 | const float ph = anchors[b * 2 + 1]; 187 | 188 | const int numGridCells = gridSize * gridSize; 189 | const int bbindex = y * gridSize + x; 190 | const float bx 191 | = x + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]); 192 | const float by 193 | = y + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]); 194 | const float bw 195 | = pw * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]); 196 | const float bh 197 | = ph * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]); 198 | 199 | const float objectness 200 | = logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]); 201 | 202 | float maxProb = 0.0f; 203 | int maxIndex = -1; 204 | 205 | for (uint i = 0; i < numOutputClasses; ++i) 206 | { 207 | float prob 208 | = exp(detections[bbindex 209 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 210 | 211 | if (prob > maxProb) 212 | { 213 | maxProb = prob; 214 | maxIndex = i; 215 | } 216 | } 217 | 218 | float sum = 0; 219 | for (uint i = 0; i < numOutputClasses; ++i) { 220 | sum += exp(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 221 | } 222 | 223 | maxProb = objectness * maxProb / sum; 224 | 225 | if (maxProb > probThresh) 226 | { 227 | 228 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 229 | } 230 | } 231 | } 232 | } 233 | return binfo; 234 | } 235 | 236 | 237 | static std::vector 238 | decodeYoloV3Tensor( 239 | const float* detections, const std::vector &mask, const std::vector &anchors, 240 | const uint gridSize, const uint stride, const uint numBBoxes, 241 | const uint numOutputClasses, const float probThresh, const uint& netW, 242 | const uint& netH) 243 | { 244 | std::vector binfo; 245 | for (uint y = 0; y < gridSize; ++y) 246 | { 247 | for (uint x = 0; x < gridSize; ++x) 248 | { 249 | for (uint b = 0; b < numBBoxes; ++b) 250 | { 251 | const float pw = anchors[mask[b] * 2]; 252 | const float ph = anchors[mask[b] * 2 + 1]; 253 | 254 | const int numGridCells = gridSize * gridSize; 255 | const int bbindex = y * gridSize + x; 256 | const float bx 257 | = x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]; 258 | const float by 259 | = y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]; 260 | const float bw 261 | = pw * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]; 262 | const float bh 263 | = ph * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]; 264 | 265 | const float objectness 266 | = detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]; 267 | 268 | float maxProb = 0.0f; 269 | int maxIndex = -1; 270 | 271 | for (uint i = 0; i < numOutputClasses; ++i) 272 | { 273 | float prob 274 | = (detections[bbindex 275 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 276 | 277 | if (prob > maxProb) 278 | { 279 | maxProb = prob; 280 | maxIndex = i; 281 | } 282 | } 283 | maxProb = objectness * maxProb; 284 | 285 | if (maxProb > probThresh) 286 | { 287 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 288 | } 289 | } 290 | } 291 | } 292 | return binfo; 293 | } 294 | 295 | static inline std::vector 296 | SortLayers(const std::vector & outputLayersInfo) 297 | { 298 | std::vector outLayers; 299 | for (auto const &layer : outputLayersInfo) { 300 | outLayers.push_back (&layer); 301 | } 302 | std::sort (outLayers.begin(), outLayers.end(), 303 | [](const NvDsInferLayerInfo *a, const NvDsInferLayerInfo *b){ 304 | return a->dims.d[1] < b->dims.d[1]; 305 | }); 306 | return outLayers; 307 | } 308 | 309 | static bool NvDsInferParseYoloV3( 310 | std::vector const& outputLayersInfo, 311 | NvDsInferNetworkInfo const& networkInfo, 312 | NvDsInferParseDetectionParams const& detectionParams, 313 | std::vector& objectList, 314 | const std::vector &anchors, 315 | const std::vector> &masks) 316 | { 317 | const uint kNUM_BBOXES = 3; 318 | static const float kNMS_THRESH = 0.3f; 319 | static const float kPROB_THRESH = 0.7f; 320 | 321 | const std::vector sortedLayers = 322 | SortLayers (outputLayersInfo); 323 | 324 | if (sortedLayers.size() != masks.size()) { 325 | std::cerr << "ERROR: yoloV3 output layer.size: " << sortedLayers.size() 326 | << " does not match mask.size: " << masks.size() << std::endl; 327 | return false; 328 | } 329 | 330 | /* if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 331 | { 332 | std::cerr << "WARNING: Num classes mismatch. Configured:" 333 | << detectionParams.numClassesConfigured 334 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 335 | }*/ 336 | 337 | std::vector objects; 338 | 339 | for (uint idx = 0; idx < masks.size(); ++idx) { 340 | const NvDsInferLayerInfo &layer = *sortedLayers[idx]; // 255 x Grid x Grid 341 | assert (layer.dims.numDims == 3); 342 | const uint gridSize = layer.dims.d[1]; 343 | const uint stride = networkInfo.width / gridSize; 344 | 345 | std::vector outObjs = 346 | decodeYoloV3Tensor((const float*)(layer.buffer), masks[idx], anchors, gridSize, stride, kNUM_BBOXES, 347 | detectionParams.numClassesConfigured, kPROB_THRESH, networkInfo.width, networkInfo.height); 348 | objects.insert(objects.end(), outObjs.begin(), outObjs.end()); 349 | } 350 | 351 | objectList.clear(); 352 | objectList = nmsAllClasses(kNMS_THRESH, objects, detectionParams.numClassesConfigured); 353 | 354 | return true; 355 | } 356 | 357 | 358 | /* C-linkage to prevent name-mangling */ 359 | extern "C" bool NvDsInferParseCustomYoloV3( 360 | std::vector const& outputLayersInfo, 361 | NvDsInferNetworkInfo const& networkInfo, 362 | NvDsInferParseDetectionParams const& detectionParams, 363 | std::vector& objectList) 364 | { 365 | static const std::vector kANCHORS = { 366 | 10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 367 | 45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0}; 368 | static const std::vector> kMASKS = { 369 | {6, 7, 8}, 370 | {3, 4, 5}, 371 | {0, 1, 2}}; 372 | return NvDsInferParseYoloV3 ( 373 | outputLayersInfo, networkInfo, detectionParams, objectList, 374 | kANCHORS, kMASKS); 375 | } 376 | 377 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 378 | std::vector const& outputLayersInfo, 379 | NvDsInferNetworkInfo const& networkInfo, 380 | NvDsInferParseDetectionParams const& detectionParams, 381 | std::vector& objectList) 382 | { 383 | static const std::vector kANCHORS = { 384 | 10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}; 385 | static const std::vector> kMASKS = { 386 | {3, 4, 5}, 387 | //{0, 1, 2}}; // as per output result, select {1,2,3} 388 | {1, 2, 3}}; 389 | 390 | return NvDsInferParseYoloV3 ( 391 | outputLayersInfo, networkInfo, detectionParams, objectList, 392 | kANCHORS, kMASKS); 393 | } 394 | 395 | static bool NvDsInferParseYoloV2( 396 | std::vector const& outputLayersInfo, 397 | NvDsInferNetworkInfo const& networkInfo, 398 | NvDsInferParseDetectionParams const& detectionParams, 399 | std::vector& objectList, 400 | const float nmsThreshold, const float probthreshold) 401 | { 402 | static const std::vector kANCHORS = { 403 | 18.3273602, 21.6763191, 59.9827194, 66.0009613, 404 | 106.829758, 175.178879, 252.250244, 112.888962, 405 | 312.656647, 293.384949 }; 406 | const uint kNUM_BBOXES = 5; 407 | 408 | if (outputLayersInfo.empty()) { 409 | std::cerr << "Could not find output layer in bbox parsing" << std::endl;; 410 | return false; 411 | } 412 | const NvDsInferLayerInfo &layer = outputLayersInfo[0]; 413 | 414 | /*if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 415 | { 416 | std::cerr << "WARNING: Num classes mismatch. Configured:" 417 | << detectionParams.numClassesConfigured 418 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 419 | }*/ 420 | 421 | assert (layer.dims.numDims == 3); 422 | const uint gridSize = layer.dims.d[1]; 423 | const uint stride = networkInfo.width / gridSize; 424 | std::vector objects = 425 | decodeYoloV2Tensor((const float*)(layer.buffer), kANCHORS, gridSize, stride, kNUM_BBOXES, 426 | detectionParams.numClassesConfigured, probthreshold, networkInfo.width, networkInfo.height); 427 | 428 | objectList.clear(); 429 | objectList = nmsAllClasses(nmsThreshold, objects, detectionParams.numClassesConfigured); 430 | 431 | // Uncomment to print outputs 432 | // if (objectList.empty()) 433 | // std::cout << "=========" << std::endl; 434 | // else 435 | // for (auto obj: objectList) { 436 | // std::cout << obj.classId << ", " << obj.detectionConfidence << ", left=" << obj.left << ", top=" << obj.top << ", width=" << obj.width << ", height=" << obj.height << std::endl; 437 | // } 438 | 439 | 440 | return true; 441 | } 442 | 443 | extern "C" bool NvDsInferParseCustomYoloV2( 444 | std::vector const& outputLayersInfo, 445 | NvDsInferNetworkInfo const& networkInfo, 446 | NvDsInferParseDetectionParams const& detectionParams, 447 | std::vector& objectList) 448 | { 449 | static const float kNMS_THRESH = 0.3f; 450 | static const float kPROB_THRESH = 0.6f; 451 | 452 | return NvDsInferParseYoloV2 ( 453 | outputLayersInfo, networkInfo, detectionParams, objectList, 454 | kNMS_THRESH, kPROB_THRESH); 455 | } 456 | 457 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 458 | std::vector const& outputLayersInfo, 459 | NvDsInferNetworkInfo const& networkInfo, 460 | NvDsInferParseDetectionParams const& detectionParams, 461 | std::vector& objectList) 462 | { 463 | static const float kNMS_THRESH = 0.2f; 464 | static const float kPROB_THRESH = 0.6f; 465 | return NvDsInferParseYoloV2 ( 466 | outputLayersInfo, networkInfo, detectionParams, objectList, 467 | kNMS_THRESH, kPROB_THRESH); 468 | } 469 | 470 | /* Check that the custom function has been defined correctly */ 471 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3); 472 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3Tiny); 473 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2); 474 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2Tiny); 475 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/Makefile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | CUDA_VER?= 24 | ifeq ($(CUDA_VER),) 25 | $(error "CUDA_VER is not set") 26 | endif 27 | CC:= g++ 28 | NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc 29 | 30 | DEEPSTREAM_INCS_PATH:=/opt/nvidia/deepstream/deepstream/sources/includes 31 | NVDSINFER_CUSTOM_IMPL_PATH:=/opt/nvidia/deepstream/deepstream/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 32 | 33 | CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations 34 | CFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include -I $(DEEPSTREAM_INCS_PATH) 35 | 36 | LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs 37 | LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group 38 | 39 | INCS:= $(wildcard *.h) 40 | SRCFILES:= nvdsparsebbox_Yolo.cpp \ 41 | $(NVDSINFER_CUSTOM_IMPL_PATH)/trt_utils.cpp \ 42 | $(NVDSINFER_CUSTOM_IMPL_PATH)/kernels.cu 43 | TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so 44 | 45 | TARGET_OBJS:= $(SRCFILES:.cpp=.o) 46 | TARGET_OBJS:= $(TARGET_OBJS:.cu=.o) 47 | 48 | all: $(TARGET_LIB) 49 | 50 | %.o: %.cpp $(INCS) Makefile 51 | $(CC) -c -o $@ $(CFLAGS) $< 52 | 53 | %.o: %.cu $(INCS) Makefile 54 | $(NVCC) -c -o $@ --compiler-options '-fPIC' $< 55 | 56 | $(TARGET_LIB) : $(TARGET_OBJS) 57 | $(CC) -o $@ $(TARGET_OBJS) $(LFLAGS) 58 | 59 | clean: 60 | rm -rf $(TARGET_LIB) 61 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the CustomVision DeepStream YoloParser from source 2 | 1. Ensure that you have installed the DeepStream 6.1.1 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 2. Clean the build environment with: 4 | ``` 5 | make CUDA_VER=11.4 clean 6 | ``` 7 | 3. Build from source wtih: 8 | ``` 9 | make CUDA_VER=11.4 10 | ``` 11 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/nvdsparsebbox_Yolo.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "nvdsinfer_custom_impl.h" 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | /*static const int NUM_CLASSES_YOLO = 2;*/ 33 | 34 | extern "C" bool NvDsInferParseCustomYoloV3( 35 | std::vector const& outputLayersInfo, 36 | NvDsInferNetworkInfo const& networkInfo, 37 | NvDsInferParseDetectionParams const& detectionParams, 38 | std::vector& objectList); 39 | 40 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 41 | std::vector const& outputLayersInfo, 42 | NvDsInferNetworkInfo const& networkInfo, 43 | NvDsInferParseDetectionParams const& detectionParams, 44 | std::vector& objectList); 45 | 46 | extern "C" bool NvDsInferParseCustomYoloV2( 47 | std::vector const& outputLayersInfo, 48 | NvDsInferNetworkInfo const& networkInfo, 49 | NvDsInferParseDetectionParams const& detectionParams, 50 | std::vector& objectList); 51 | 52 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 53 | std::vector const& outputLayersInfo, 54 | NvDsInferNetworkInfo const& networkInfo, 55 | NvDsInferParseDetectionParams const& detectionParams, 56 | std::vector& objectList); 57 | 58 | static unsigned clamp(int val, int minVal, int maxVal) 59 | { 60 | assert(minVal <= maxVal); 61 | return std::min(maxVal, std::max(minVal, val)); 62 | } 63 | 64 | /* This is a sample bounding box parsing function for the sample YoloV3 detector model */ 65 | static NvDsInferParseObjectInfo convertBBox(const float& bx, const float& by, const float& bw, 66 | const float& bh, const int& stride, const uint& netW, 67 | const uint& netH) 68 | { 69 | NvDsInferParseObjectInfo b; 70 | // Restore coordinates to network input resolution 71 | float x = bx * stride; 72 | float y = by * stride; 73 | 74 | b.left = clamp(x - bw / 2, 0, netW); 75 | b.width = clamp(bw, 0, netW - b.left); 76 | b.top = clamp(y - bh / 2, 0, netH); 77 | b.height = clamp(bh, 0, netH - b.top); 78 | 79 | return b; 80 | } 81 | 82 | static void addBBoxProposal(const float bx, const float by, const float bw, const float bh, 83 | const uint stride, const uint& netW, const uint& netH, const int maxIndex, 84 | const float maxProb, std::vector& binfo) 85 | { 86 | NvDsInferParseObjectInfo bbi = convertBBox(bx, by, bw, bh, stride, netW, netH); 87 | 88 | bbi.detectionConfidence = maxProb; 89 | bbi.classId = maxIndex; 90 | 91 | binfo.push_back(bbi); 92 | } 93 | 94 | static std::vector 95 | nonMaximumSuppression(const float nmsThresh, std::vector binfo) 96 | { 97 | auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float { 98 | if (x1min > x2min) 99 | { 100 | std::swap(x1min, x2min); 101 | std::swap(x1max, x2max); 102 | } 103 | return x1max < x2min ? 0 : std::min(x1max, x2max) - x2min; 104 | }; 105 | auto computeIoU 106 | = [&overlap1D](NvDsInferParseObjectInfo& bbox1, NvDsInferParseObjectInfo& bbox2) -> float { 107 | float overlapX 108 | = overlap1D(bbox1.left, bbox1.left + bbox1.width, bbox2.left, bbox2.left + bbox2.width); 109 | float overlapY 110 | = overlap1D(bbox1.top, bbox1.top + bbox1.height, bbox2.top, bbox2.top + bbox2.height); 111 | float area1 = (bbox1.width) * (bbox1.height); 112 | float area2 = (bbox2.width) * (bbox2.height); 113 | float overlap2D = overlapX * overlapY; 114 | float u = area1 + area2 - overlap2D; 115 | return u == 0 ? 0 : overlap2D / u; 116 | }; 117 | 118 | std::stable_sort(binfo.begin(), binfo.end(), 119 | [](const NvDsInferParseObjectInfo& b1, const NvDsInferParseObjectInfo& b2) { 120 | return b1.detectionConfidence > b2.detectionConfidence; 121 | }); 122 | std::vector out; 123 | for (auto i : binfo) 124 | { 125 | bool keep = true; 126 | for (auto j : out) 127 | { 128 | if (keep) 129 | { 130 | float overlap = computeIoU(i, j); 131 | keep = overlap <= nmsThresh; 132 | } 133 | else 134 | break; 135 | } 136 | if (keep) out.push_back(i); 137 | } 138 | return out; 139 | } 140 | 141 | static std::vector 142 | nmsAllClasses(const float nmsThresh, 143 | std::vector& binfo, 144 | const uint numClasses) 145 | { 146 | std::vector result; 147 | std::vector> splitBoxes(numClasses); 148 | for (auto& box : binfo) 149 | { 150 | splitBoxes.at(box.classId).push_back(box); 151 | } 152 | 153 | for (auto& boxes : splitBoxes) 154 | { 155 | boxes = nonMaximumSuppression(nmsThresh, boxes); 156 | result.insert(result.end(), boxes.begin(), boxes.end()); 157 | } 158 | return result; 159 | } 160 | 161 | static float logistic(float x) 162 | { 163 | if (x > 0) { 164 | return 1 / (1 + exp(-x)); 165 | } else { 166 | float e = exp(x); 167 | return e / (1 + e); 168 | } 169 | } 170 | 171 | static std::vector 172 | decodeYoloV2Tensor( 173 | const float* detections, const std::vector &anchors, 174 | const uint gridSize, const uint stride, const uint numBBoxes, 175 | const uint numOutputClasses, const float probThresh, const uint& netW, 176 | const uint& netH) 177 | { 178 | std::vector binfo; 179 | for (uint y = 0; y < gridSize; ++y) 180 | { 181 | for (uint x = 0; x < gridSize; ++x) 182 | { 183 | for (uint b = 0; b < numBBoxes; ++b) 184 | { 185 | const float pw = anchors[b * 2]; 186 | const float ph = anchors[b * 2 + 1]; 187 | 188 | const int numGridCells = gridSize * gridSize; 189 | const int bbindex = y * gridSize + x; 190 | const float bx 191 | = x + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]); 192 | const float by 193 | = y + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]); 194 | const float bw 195 | = pw * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]); 196 | const float bh 197 | = ph * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]); 198 | 199 | const float objectness 200 | = logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]); 201 | 202 | float maxProb = 0.0f; 203 | int maxIndex = -1; 204 | 205 | for (uint i = 0; i < numOutputClasses; ++i) 206 | { 207 | float prob 208 | = exp(detections[bbindex 209 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 210 | 211 | if (prob > maxProb) 212 | { 213 | maxProb = prob; 214 | maxIndex = i; 215 | } 216 | } 217 | 218 | float sum = 0; 219 | for (uint i = 0; i < numOutputClasses; ++i) { 220 | sum += exp(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 221 | } 222 | 223 | maxProb = objectness * maxProb / sum; 224 | 225 | if (maxProb > probThresh) 226 | { 227 | 228 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 229 | } 230 | } 231 | } 232 | } 233 | return binfo; 234 | } 235 | 236 | 237 | static std::vector 238 | decodeYoloV3Tensor( 239 | const float* detections, const std::vector &mask, const std::vector &anchors, 240 | const uint gridSize, const uint stride, const uint numBBoxes, 241 | const uint numOutputClasses, const float probThresh, const uint& netW, 242 | const uint& netH) 243 | { 244 | std::vector binfo; 245 | for (uint y = 0; y < gridSize; ++y) 246 | { 247 | for (uint x = 0; x < gridSize; ++x) 248 | { 249 | for (uint b = 0; b < numBBoxes; ++b) 250 | { 251 | const float pw = anchors[mask[b] * 2]; 252 | const float ph = anchors[mask[b] * 2 + 1]; 253 | 254 | const int numGridCells = gridSize * gridSize; 255 | const int bbindex = y * gridSize + x; 256 | const float bx 257 | = x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]; 258 | const float by 259 | = y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]; 260 | const float bw 261 | = pw * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]; 262 | const float bh 263 | = ph * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]; 264 | 265 | const float objectness 266 | = detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]; 267 | 268 | float maxProb = 0.0f; 269 | int maxIndex = -1; 270 | 271 | for (uint i = 0; i < numOutputClasses; ++i) 272 | { 273 | float prob 274 | = (detections[bbindex 275 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 276 | 277 | if (prob > maxProb) 278 | { 279 | maxProb = prob; 280 | maxIndex = i; 281 | } 282 | } 283 | maxProb = objectness * maxProb; 284 | 285 | if (maxProb > probThresh) 286 | { 287 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 288 | } 289 | } 290 | } 291 | } 292 | return binfo; 293 | } 294 | 295 | static inline std::vector 296 | SortLayers(const std::vector & outputLayersInfo) 297 | { 298 | std::vector outLayers; 299 | for (auto const &layer : outputLayersInfo) { 300 | outLayers.push_back (&layer); 301 | } 302 | std::sort (outLayers.begin(), outLayers.end(), 303 | [](const NvDsInferLayerInfo *a, const NvDsInferLayerInfo *b){ 304 | return a->dims.d[1] < b->dims.d[1]; 305 | }); 306 | return outLayers; 307 | } 308 | 309 | static bool NvDsInferParseYoloV3( 310 | std::vector const& outputLayersInfo, 311 | NvDsInferNetworkInfo const& networkInfo, 312 | NvDsInferParseDetectionParams const& detectionParams, 313 | std::vector& objectList, 314 | const std::vector &anchors, 315 | const std::vector> &masks) 316 | { 317 | const uint kNUM_BBOXES = 3; 318 | static const float kNMS_THRESH = 0.3f; 319 | static const float kPROB_THRESH = 0.7f; 320 | 321 | const std::vector sortedLayers = 322 | SortLayers (outputLayersInfo); 323 | 324 | if (sortedLayers.size() != masks.size()) { 325 | std::cerr << "ERROR: yoloV3 output layer.size: " << sortedLayers.size() 326 | << " does not match mask.size: " << masks.size() << std::endl; 327 | return false; 328 | } 329 | 330 | /* if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 331 | { 332 | std::cerr << "WARNING: Num classes mismatch. Configured:" 333 | << detectionParams.numClassesConfigured 334 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 335 | }*/ 336 | 337 | std::vector objects; 338 | 339 | for (uint idx = 0; idx < masks.size(); ++idx) { 340 | const NvDsInferLayerInfo &layer = *sortedLayers[idx]; // 255 x Grid x Grid 341 | assert (layer.dims.numDims == 3); 342 | const uint gridSize = layer.dims.d[1]; 343 | const uint stride = networkInfo.width / gridSize; 344 | 345 | std::vector outObjs = 346 | decodeYoloV3Tensor((const float*)(layer.buffer), masks[idx], anchors, gridSize, stride, kNUM_BBOXES, 347 | detectionParams.numClassesConfigured, kPROB_THRESH, networkInfo.width, networkInfo.height); 348 | objects.insert(objects.end(), outObjs.begin(), outObjs.end()); 349 | } 350 | 351 | objectList.clear(); 352 | objectList = nmsAllClasses(kNMS_THRESH, objects, detectionParams.numClassesConfigured); 353 | 354 | return true; 355 | } 356 | 357 | 358 | /* C-linkage to prevent name-mangling */ 359 | extern "C" bool NvDsInferParseCustomYoloV3( 360 | std::vector const& outputLayersInfo, 361 | NvDsInferNetworkInfo const& networkInfo, 362 | NvDsInferParseDetectionParams const& detectionParams, 363 | std::vector& objectList) 364 | { 365 | static const std::vector kANCHORS = { 366 | 10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 367 | 45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0}; 368 | static const std::vector> kMASKS = { 369 | {6, 7, 8}, 370 | {3, 4, 5}, 371 | {0, 1, 2}}; 372 | return NvDsInferParseYoloV3 ( 373 | outputLayersInfo, networkInfo, detectionParams, objectList, 374 | kANCHORS, kMASKS); 375 | } 376 | 377 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 378 | std::vector const& outputLayersInfo, 379 | NvDsInferNetworkInfo const& networkInfo, 380 | NvDsInferParseDetectionParams const& detectionParams, 381 | std::vector& objectList) 382 | { 383 | static const std::vector kANCHORS = { 384 | 10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}; 385 | static const std::vector> kMASKS = { 386 | {3, 4, 5}, 387 | //{0, 1, 2}}; // as per output result, select {1,2,3} 388 | {1, 2, 3}}; 389 | 390 | return NvDsInferParseYoloV3 ( 391 | outputLayersInfo, networkInfo, detectionParams, objectList, 392 | kANCHORS, kMASKS); 393 | } 394 | 395 | static bool NvDsInferParseYoloV2( 396 | std::vector const& outputLayersInfo, 397 | NvDsInferNetworkInfo const& networkInfo, 398 | NvDsInferParseDetectionParams const& detectionParams, 399 | std::vector& objectList, 400 | const float nmsThreshold, const float probthreshold) 401 | { 402 | static const std::vector kANCHORS = { 403 | 18.3273602, 21.6763191, 59.9827194, 66.0009613, 404 | 106.829758, 175.178879, 252.250244, 112.888962, 405 | 312.656647, 293.384949 }; 406 | const uint kNUM_BBOXES = 5; 407 | 408 | if (outputLayersInfo.empty()) { 409 | std::cerr << "Could not find output layer in bbox parsing" << std::endl;; 410 | return false; 411 | } 412 | const NvDsInferLayerInfo &layer = outputLayersInfo[0]; 413 | 414 | /*if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 415 | { 416 | std::cerr << "WARNING: Num classes mismatch. Configured:" 417 | << detectionParams.numClassesConfigured 418 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 419 | }*/ 420 | 421 | assert (layer.dims.numDims == 3); 422 | const uint gridSize = layer.dims.d[1]; 423 | const uint stride = networkInfo.width / gridSize; 424 | std::vector objects = 425 | decodeYoloV2Tensor((const float*)(layer.buffer), kANCHORS, gridSize, stride, kNUM_BBOXES, 426 | detectionParams.numClassesConfigured, probthreshold, networkInfo.width, networkInfo.height); 427 | 428 | objectList.clear(); 429 | objectList = nmsAllClasses(nmsThreshold, objects, detectionParams.numClassesConfigured); 430 | 431 | // Uncomment to print outputs 432 | // if (objectList.empty()) 433 | // std::cout << "=========" << std::endl; 434 | // else 435 | // for (auto obj: objectList) { 436 | // std::cout << obj.classId << ", " << obj.detectionConfidence << ", left=" << obj.left << ", top=" << obj.top << ", width=" << obj.width << ", height=" << obj.height << std::endl; 437 | // } 438 | 439 | 440 | return true; 441 | } 442 | 443 | extern "C" bool NvDsInferParseCustomYoloV2( 444 | std::vector const& outputLayersInfo, 445 | NvDsInferNetworkInfo const& networkInfo, 446 | NvDsInferParseDetectionParams const& detectionParams, 447 | std::vector& objectList) 448 | { 449 | static const float kNMS_THRESH = 0.3f; 450 | static const float kPROB_THRESH = 0.6f; 451 | 452 | return NvDsInferParseYoloV2 ( 453 | outputLayersInfo, networkInfo, detectionParams, objectList, 454 | kNMS_THRESH, kPROB_THRESH); 455 | } 456 | 457 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 458 | std::vector const& outputLayersInfo, 459 | NvDsInferNetworkInfo const& networkInfo, 460 | NvDsInferParseDetectionParams const& detectionParams, 461 | std::vector& objectList) 462 | { 463 | static const float kNMS_THRESH = 0.2f; 464 | static const float kPROB_THRESH = 0.6f; 465 | return NvDsInferParseYoloV2 ( 466 | outputLayersInfo, networkInfo, detectionParams, objectList, 467 | kNMS_THRESH, kPROB_THRESH); 468 | } 469 | 470 | /* Check that the custom function has been defined correctly */ 471 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3); 472 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3Tiny); 473 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2); 474 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2Tiny); 475 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/nvdsparsebbox_Yolo.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/nvdsparsebbox_Yolo.o -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/Makefile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | CUDA_VER?= 24 | ifeq ($(CUDA_VER),) 25 | $(error "CUDA_VER is not set") 26 | endif 27 | CC:= g++ 28 | NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc 29 | 30 | DEEPSTREAM_INCS_PATH:=/opt/nvidia/deepstream/deepstream/sources/includes 31 | NVDSINFER_CUSTOM_IMPL_PATH:=/opt/nvidia/deepstream/deepstream/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 32 | 33 | CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations 34 | CFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include -I $(DEEPSTREAM_INCS_PATH) 35 | 36 | LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs 37 | LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group 38 | 39 | INCS:= $(wildcard *.h) 40 | SRCFILES:= nvdsparsebbox_Yolo.cpp \ 41 | $(NVDSINFER_CUSTOM_IMPL_PATH)/trt_utils.cpp \ 42 | $(NVDSINFER_CUSTOM_IMPL_PATH)/kernels.cu 43 | TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so 44 | 45 | TARGET_OBJS:= $(SRCFILES:.cpp=.o) 46 | TARGET_OBJS:= $(TARGET_OBJS:.cu=.o) 47 | 48 | all: $(TARGET_LIB) 49 | 50 | %.o: %.cpp $(INCS) Makefile 51 | $(CC) -c -o $@ $(CFLAGS) $< 52 | 53 | %.o: %.cu $(INCS) Makefile 54 | $(NVCC) -c -o $@ --compiler-options '-fPIC' $< 55 | 56 | $(TARGET_LIB) : $(TARGET_OBJS) 57 | $(CC) -o $@ $(TARGET_OBJS) $(LFLAGS) 58 | 59 | clean: 60 | rm -rf $(TARGET_LIB) 61 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the CustomVision DeepStream YoloParser from source 2 | 1. Ensure that you have installed the DeepStream 6.1 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 1. Clean the build environment with: 4 | ``` 5 | make CUDA_VER=11.4 clean 6 | ``` 7 | 2. Build from source wtih: 8 | ``` 9 | make CUDA_VER=11.4 10 | ``` 11 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/nvdsparsebbox_Yolo.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a 5 | * copy of this software and associated documentation files (the "Software"), 6 | * to deal in the Software without restriction, including without limitation 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | * and/or sell copies of the Software, and to permit persons to whom the 9 | * Software is furnished to do so, subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in 12 | * all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | * DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #include "nvdsinfer_custom_impl.h" 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | /*static const int NUM_CLASSES_YOLO = 2;*/ 33 | 34 | extern "C" bool NvDsInferParseCustomYoloV3( 35 | std::vector const& outputLayersInfo, 36 | NvDsInferNetworkInfo const& networkInfo, 37 | NvDsInferParseDetectionParams const& detectionParams, 38 | std::vector& objectList); 39 | 40 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 41 | std::vector const& outputLayersInfo, 42 | NvDsInferNetworkInfo const& networkInfo, 43 | NvDsInferParseDetectionParams const& detectionParams, 44 | std::vector& objectList); 45 | 46 | extern "C" bool NvDsInferParseCustomYoloV2( 47 | std::vector const& outputLayersInfo, 48 | NvDsInferNetworkInfo const& networkInfo, 49 | NvDsInferParseDetectionParams const& detectionParams, 50 | std::vector& objectList); 51 | 52 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 53 | std::vector const& outputLayersInfo, 54 | NvDsInferNetworkInfo const& networkInfo, 55 | NvDsInferParseDetectionParams const& detectionParams, 56 | std::vector& objectList); 57 | 58 | static unsigned clamp(int val, int minVal, int maxVal) 59 | { 60 | assert(minVal <= maxVal); 61 | return std::min(maxVal, std::max(minVal, val)); 62 | } 63 | 64 | /* This is a sample bounding box parsing function for the sample YoloV3 detector model */ 65 | static NvDsInferParseObjectInfo convertBBox(const float& bx, const float& by, const float& bw, 66 | const float& bh, const int& stride, const uint& netW, 67 | const uint& netH) 68 | { 69 | NvDsInferParseObjectInfo b; 70 | // Restore coordinates to network input resolution 71 | float x = bx * stride; 72 | float y = by * stride; 73 | 74 | b.left = clamp(x - bw / 2, 0, netW); 75 | b.width = clamp(bw, 0, netW - b.left); 76 | b.top = clamp(y - bh / 2, 0, netH); 77 | b.height = clamp(bh, 0, netH - b.top); 78 | 79 | return b; 80 | } 81 | 82 | static void addBBoxProposal(const float bx, const float by, const float bw, const float bh, 83 | const uint stride, const uint& netW, const uint& netH, const int maxIndex, 84 | const float maxProb, std::vector& binfo) 85 | { 86 | NvDsInferParseObjectInfo bbi = convertBBox(bx, by, bw, bh, stride, netW, netH); 87 | 88 | bbi.detectionConfidence = maxProb; 89 | bbi.classId = maxIndex; 90 | 91 | binfo.push_back(bbi); 92 | } 93 | 94 | static std::vector 95 | nonMaximumSuppression(const float nmsThresh, std::vector binfo) 96 | { 97 | auto overlap1D = [](float x1min, float x1max, float x2min, float x2max) -> float { 98 | if (x1min > x2min) 99 | { 100 | std::swap(x1min, x2min); 101 | std::swap(x1max, x2max); 102 | } 103 | return x1max < x2min ? 0 : std::min(x1max, x2max) - x2min; 104 | }; 105 | auto computeIoU 106 | = [&overlap1D](NvDsInferParseObjectInfo& bbox1, NvDsInferParseObjectInfo& bbox2) -> float { 107 | float overlapX 108 | = overlap1D(bbox1.left, bbox1.left + bbox1.width, bbox2.left, bbox2.left + bbox2.width); 109 | float overlapY 110 | = overlap1D(bbox1.top, bbox1.top + bbox1.height, bbox2.top, bbox2.top + bbox2.height); 111 | float area1 = (bbox1.width) * (bbox1.height); 112 | float area2 = (bbox2.width) * (bbox2.height); 113 | float overlap2D = overlapX * overlapY; 114 | float u = area1 + area2 - overlap2D; 115 | return u == 0 ? 0 : overlap2D / u; 116 | }; 117 | 118 | std::stable_sort(binfo.begin(), binfo.end(), 119 | [](const NvDsInferParseObjectInfo& b1, const NvDsInferParseObjectInfo& b2) { 120 | return b1.detectionConfidence > b2.detectionConfidence; 121 | }); 122 | std::vector out; 123 | for (auto i : binfo) 124 | { 125 | bool keep = true; 126 | for (auto j : out) 127 | { 128 | if (keep) 129 | { 130 | float overlap = computeIoU(i, j); 131 | keep = overlap <= nmsThresh; 132 | } 133 | else 134 | break; 135 | } 136 | if (keep) out.push_back(i); 137 | } 138 | return out; 139 | } 140 | 141 | static std::vector 142 | nmsAllClasses(const float nmsThresh, 143 | std::vector& binfo, 144 | const uint numClasses) 145 | { 146 | std::vector result; 147 | std::vector> splitBoxes(numClasses); 148 | for (auto& box : binfo) 149 | { 150 | splitBoxes.at(box.classId).push_back(box); 151 | } 152 | 153 | for (auto& boxes : splitBoxes) 154 | { 155 | boxes = nonMaximumSuppression(nmsThresh, boxes); 156 | result.insert(result.end(), boxes.begin(), boxes.end()); 157 | } 158 | return result; 159 | } 160 | 161 | static float logistic(float x) 162 | { 163 | if (x > 0) { 164 | return 1 / (1 + exp(-x)); 165 | } else { 166 | float e = exp(x); 167 | return e / (1 + e); 168 | } 169 | } 170 | 171 | static std::vector 172 | decodeYoloV2Tensor( 173 | const float* detections, const std::vector &anchors, 174 | const uint gridSize, const uint stride, const uint numBBoxes, 175 | const uint numOutputClasses, const float probThresh, const uint& netW, 176 | const uint& netH) 177 | { 178 | std::vector binfo; 179 | for (uint y = 0; y < gridSize; ++y) 180 | { 181 | for (uint x = 0; x < gridSize; ++x) 182 | { 183 | for (uint b = 0; b < numBBoxes; ++b) 184 | { 185 | const float pw = anchors[b * 2]; 186 | const float ph = anchors[b * 2 + 1]; 187 | 188 | const int numGridCells = gridSize * gridSize; 189 | const int bbindex = y * gridSize + x; 190 | const float bx 191 | = x + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]); 192 | const float by 193 | = y + logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]); 194 | const float bw 195 | = pw * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]); 196 | const float bh 197 | = ph * exp (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]); 198 | 199 | const float objectness 200 | = logistic(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]); 201 | 202 | float maxProb = 0.0f; 203 | int maxIndex = -1; 204 | 205 | for (uint i = 0; i < numOutputClasses; ++i) 206 | { 207 | float prob 208 | = exp(detections[bbindex 209 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 210 | 211 | if (prob > maxProb) 212 | { 213 | maxProb = prob; 214 | maxIndex = i; 215 | } 216 | } 217 | 218 | float sum = 0; 219 | for (uint i = 0; i < numOutputClasses; ++i) { 220 | sum += exp(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 221 | } 222 | 223 | maxProb = objectness * maxProb / sum; 224 | 225 | if (maxProb > probThresh) 226 | { 227 | 228 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 229 | } 230 | } 231 | } 232 | } 233 | return binfo; 234 | } 235 | 236 | 237 | static std::vector 238 | decodeYoloV3Tensor( 239 | const float* detections, const std::vector &mask, const std::vector &anchors, 240 | const uint gridSize, const uint stride, const uint numBBoxes, 241 | const uint numOutputClasses, const float probThresh, const uint& netW, 242 | const uint& netH) 243 | { 244 | std::vector binfo; 245 | for (uint y = 0; y < gridSize; ++y) 246 | { 247 | for (uint x = 0; x < gridSize; ++x) 248 | { 249 | for (uint b = 0; b < numBBoxes; ++b) 250 | { 251 | const float pw = anchors[mask[b] * 2]; 252 | const float ph = anchors[mask[b] * 2 + 1]; 253 | 254 | const int numGridCells = gridSize * gridSize; 255 | const int bbindex = y * gridSize + x; 256 | const float bx 257 | = x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)]; 258 | const float by 259 | = y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)]; 260 | const float bw 261 | = pw * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]; 262 | const float bh 263 | = ph * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]; 264 | 265 | const float objectness 266 | = detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)]; 267 | 268 | float maxProb = 0.0f; 269 | int maxIndex = -1; 270 | 271 | for (uint i = 0; i < numOutputClasses; ++i) 272 | { 273 | float prob 274 | = (detections[bbindex 275 | + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]); 276 | 277 | if (prob > maxProb) 278 | { 279 | maxProb = prob; 280 | maxIndex = i; 281 | } 282 | } 283 | maxProb = objectness * maxProb; 284 | 285 | if (maxProb > probThresh) 286 | { 287 | addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo); 288 | } 289 | } 290 | } 291 | } 292 | return binfo; 293 | } 294 | 295 | static inline std::vector 296 | SortLayers(const std::vector & outputLayersInfo) 297 | { 298 | std::vector outLayers; 299 | for (auto const &layer : outputLayersInfo) { 300 | outLayers.push_back (&layer); 301 | } 302 | std::sort (outLayers.begin(), outLayers.end(), 303 | [](const NvDsInferLayerInfo *a, const NvDsInferLayerInfo *b){ 304 | return a->dims.d[1] < b->dims.d[1]; 305 | }); 306 | return outLayers; 307 | } 308 | 309 | static bool NvDsInferParseYoloV3( 310 | std::vector const& outputLayersInfo, 311 | NvDsInferNetworkInfo const& networkInfo, 312 | NvDsInferParseDetectionParams const& detectionParams, 313 | std::vector& objectList, 314 | const std::vector &anchors, 315 | const std::vector> &masks) 316 | { 317 | const uint kNUM_BBOXES = 3; 318 | static const float kNMS_THRESH = 0.3f; 319 | static const float kPROB_THRESH = 0.7f; 320 | 321 | const std::vector sortedLayers = 322 | SortLayers (outputLayersInfo); 323 | 324 | if (sortedLayers.size() != masks.size()) { 325 | std::cerr << "ERROR: yoloV3 output layer.size: " << sortedLayers.size() 326 | << " does not match mask.size: " << masks.size() << std::endl; 327 | return false; 328 | } 329 | 330 | /* if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 331 | { 332 | std::cerr << "WARNING: Num classes mismatch. Configured:" 333 | << detectionParams.numClassesConfigured 334 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 335 | }*/ 336 | 337 | std::vector objects; 338 | 339 | for (uint idx = 0; idx < masks.size(); ++idx) { 340 | const NvDsInferLayerInfo &layer = *sortedLayers[idx]; // 255 x Grid x Grid 341 | assert (layer.dims.numDims == 3); 342 | const uint gridSize = layer.dims.d[1]; 343 | const uint stride = networkInfo.width / gridSize; 344 | 345 | std::vector outObjs = 346 | decodeYoloV3Tensor((const float*)(layer.buffer), masks[idx], anchors, gridSize, stride, kNUM_BBOXES, 347 | detectionParams.numClassesConfigured, kPROB_THRESH, networkInfo.width, networkInfo.height); 348 | objects.insert(objects.end(), outObjs.begin(), outObjs.end()); 349 | } 350 | 351 | objectList.clear(); 352 | objectList = nmsAllClasses(kNMS_THRESH, objects, detectionParams.numClassesConfigured); 353 | 354 | return true; 355 | } 356 | 357 | 358 | /* C-linkage to prevent name-mangling */ 359 | extern "C" bool NvDsInferParseCustomYoloV3( 360 | std::vector const& outputLayersInfo, 361 | NvDsInferNetworkInfo const& networkInfo, 362 | NvDsInferParseDetectionParams const& detectionParams, 363 | std::vector& objectList) 364 | { 365 | static const std::vector kANCHORS = { 366 | 10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 367 | 45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0}; 368 | static const std::vector> kMASKS = { 369 | {6, 7, 8}, 370 | {3, 4, 5}, 371 | {0, 1, 2}}; 372 | return NvDsInferParseYoloV3 ( 373 | outputLayersInfo, networkInfo, detectionParams, objectList, 374 | kANCHORS, kMASKS); 375 | } 376 | 377 | extern "C" bool NvDsInferParseCustomYoloV3Tiny( 378 | std::vector const& outputLayersInfo, 379 | NvDsInferNetworkInfo const& networkInfo, 380 | NvDsInferParseDetectionParams const& detectionParams, 381 | std::vector& objectList) 382 | { 383 | static const std::vector kANCHORS = { 384 | 10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}; 385 | static const std::vector> kMASKS = { 386 | {3, 4, 5}, 387 | //{0, 1, 2}}; // as per output result, select {1,2,3} 388 | {1, 2, 3}}; 389 | 390 | return NvDsInferParseYoloV3 ( 391 | outputLayersInfo, networkInfo, detectionParams, objectList, 392 | kANCHORS, kMASKS); 393 | } 394 | 395 | static bool NvDsInferParseYoloV2( 396 | std::vector const& outputLayersInfo, 397 | NvDsInferNetworkInfo const& networkInfo, 398 | NvDsInferParseDetectionParams const& detectionParams, 399 | std::vector& objectList, 400 | const float nmsThreshold, const float probthreshold) 401 | { 402 | static const std::vector kANCHORS = { 403 | 18.3273602, 21.6763191, 59.9827194, 66.0009613, 404 | 106.829758, 175.178879, 252.250244, 112.888962, 405 | 312.656647, 293.384949 }; 406 | const uint kNUM_BBOXES = 5; 407 | 408 | if (outputLayersInfo.empty()) { 409 | std::cerr << "Could not find output layer in bbox parsing" << std::endl;; 410 | return false; 411 | } 412 | const NvDsInferLayerInfo &layer = outputLayersInfo[0]; 413 | 414 | /*if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured) 415 | { 416 | std::cerr << "WARNING: Num classes mismatch. Configured:" 417 | << detectionParams.numClassesConfigured 418 | << ", detected by network: " << NUM_CLASSES_YOLO << std::endl; 419 | }*/ 420 | 421 | assert (layer.dims.numDims == 3); 422 | const uint gridSize = layer.dims.d[1]; 423 | const uint stride = networkInfo.width / gridSize; 424 | std::vector objects = 425 | decodeYoloV2Tensor((const float*)(layer.buffer), kANCHORS, gridSize, stride, kNUM_BBOXES, 426 | detectionParams.numClassesConfigured, probthreshold, networkInfo.width, networkInfo.height); 427 | 428 | objectList.clear(); 429 | objectList = nmsAllClasses(nmsThreshold, objects, detectionParams.numClassesConfigured); 430 | 431 | // Uncomment to print outputs 432 | // if (objectList.empty()) 433 | // std::cout << "=========" << std::endl; 434 | // else 435 | // for (auto obj: objectList) { 436 | // std::cout << obj.classId << ", " << obj.detectionConfidence << ", left=" << obj.left << ", top=" << obj.top << ", width=" << obj.width << ", height=" << obj.height << std::endl; 437 | // } 438 | 439 | 440 | return true; 441 | } 442 | 443 | extern "C" bool NvDsInferParseCustomYoloV2( 444 | std::vector const& outputLayersInfo, 445 | NvDsInferNetworkInfo const& networkInfo, 446 | NvDsInferParseDetectionParams const& detectionParams, 447 | std::vector& objectList) 448 | { 449 | static const float kNMS_THRESH = 0.3f; 450 | static const float kPROB_THRESH = 0.6f; 451 | 452 | return NvDsInferParseYoloV2 ( 453 | outputLayersInfo, networkInfo, detectionParams, objectList, 454 | kNMS_THRESH, kPROB_THRESH); 455 | } 456 | 457 | extern "C" bool NvDsInferParseCustomYoloV2Tiny( 458 | std::vector const& outputLayersInfo, 459 | NvDsInferNetworkInfo const& networkInfo, 460 | NvDsInferParseDetectionParams const& detectionParams, 461 | std::vector& objectList) 462 | { 463 | static const float kNMS_THRESH = 0.2f; 464 | static const float kPROB_THRESH = 0.6f; 465 | return NvDsInferParseYoloV2 ( 466 | outputLayersInfo, networkInfo, detectionParams, objectList, 467 | kNMS_THRESH, kPROB_THRESH); 468 | } 469 | 470 | /* Check that the custom function has been defined correctly */ 471 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3); 472 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3Tiny); 473 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2); 474 | CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2Tiny); 475 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/nvdsparsebbox_Yolo.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomVision_DeepStream6.1_JetPack5.0.1/nvdsparsebbox_Yolo.o -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream5.0_JetPack4.4/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the DeepStream YoloParser from source 2 | 1. Ensure that you have installed the DeepStream 5.0 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 2. Navigate to the following directory with: 4 | ``` 5 | cd /opt/nvidia/deepstream/deepstream-5.0/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 6 | ``` 7 | 3. Clean the build environment with: 8 | ``` 9 | make CUDA_VER=10.2 clean 10 | ``` 11 | 4. Build from source with: 12 | ``` 13 | make CUDA_VER=10.2 14 | ``` 15 | 5. Copy the resulting shared object to this directory with: 16 | ``` 17 | cp /opt/nvidia/deepstream/deepstream-5.0/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so /data/misc/storage/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream5.0_JetPack4.4 18 | ``` -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream5.0_JetPack4.4/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream5.0_JetPack4.4/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the DeepStream YoloParser from sourcee 2 | 1. Ensure that you have installed the DeepStream 6.1.1 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 2. Navigate to the following directory with: 4 | ``` 5 | cd /opt/nvidia/deepstream/deepstream-6.1/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 6 | ``` 7 | 3. Clean the build environment with: 8 | ``` 9 | make CUDA_VER=11.4 clean 10 | ``` 11 | 4. Build from source wtih: 12 | ``` 13 | make CUDA_VER=11.4 14 | ``` 15 | 5. Copy the resulting shared object to this directory with: 16 | ``` 17 | cp /opt/nvidia/deepstream/deepstream-6.1/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so /data/misc/storage/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2 18 | ``` 19 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1_JetPack5.0.1/README.MD: -------------------------------------------------------------------------------- 1 | ## Instructions to build the DeepStream YoloParser from source 2 | 1. Ensure that you have installed the DeepStream 6.1 sources included in the [DeepStream Installer](https://developer.nvidia.com/deepstream-getting-started) 3 | 2. Navigate to the following directory with: 4 | ``` 5 | cd /opt/nvidia/deepstream/deepstream-6.1/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo 6 | ``` 7 | 3. Clean the build environment with: 8 | ``` 9 | make CUDA_VER=11.4 clean 10 | ``` 11 | 4. Build from source wtih: 12 | ``` 13 | make CUDA_VER=11.4 14 | ``` 15 | 5. Copy the resulting shared object to this directory with: 16 | ``` 17 | cp /opt/nvidia/deepstream/deepstream-6.1/sources/objectDetector_Yolo/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so /data/misc/storage/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1_JetPack5.0.1 18 | ``` -------------------------------------------------------------------------------- /services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1_JetPack5.0.1/libnvdsinfer_custom_impl_Yolo.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/DEEPSTREAM/YoloParser/CustomYolo_DeepStream6.1_JetPack5.0.1/libnvdsinfer_custom_impl_Yolo.so -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/DSConfig-CustomVisionAI.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | [application] 10 | enable-perf-measurement=0 11 | perf-measurement-interval-sec=5 12 | #gie-kitti-output-dir=streamscl 13 | 14 | [tiled-display] 15 | enable=1 16 | rows=1 17 | columns=1 18 | width=1280 19 | height=640 20 | gpu-id=0 21 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 22 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla 23 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla 24 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla 25 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 26 | nvbuf-memory-type=0 27 | 28 | [source0] 29 | enable=1 30 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 5=CSI 31 | type=4 32 | uri=rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4 33 | rtsp-reconnect-interval-sec=60 34 | #latency=1000 35 | #type=5 36 | #camera-width=3280 37 | #camera-height=2464 38 | #camera-fps-n=20 39 | #camera-fps-d=1 40 | #camera-csi-sensor-id=0 41 | # (0): memtype_device - Memory type Device 42 | # (1): memtype_pinned - Memory type Host Pinned 43 | # (2): memtype_unified - Memory type Unified 44 | cudadec-memtype=0 45 | 46 | [sink0] 47 | enable=1 48 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 49 | type=2 50 | sync=0 51 | source-id=0 52 | gpu-id=0 53 | qos=0 54 | nvbuf-memory-type=0 55 | overlay-id=1 56 | 57 | [sink1] 58 | enable=1 59 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker 60 | type=6 61 | sync=0 62 | msg-conv-config=msgconv_config.txt 63 | #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload 64 | #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal 65 | #(256): PAYLOAD_RESERVED - Reserved type 66 | #(257): PAYLOAD_CUSTOM - Custom schema payload 67 | msg-conv-payload-type=1 68 | msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_azure_edge_proto.so 69 | topic=mytopic 70 | #Optional: 71 | #msg-broker-config=../../../../libs/azure_protocol_adaptor/module_client/cfg_azure.txt 72 | 73 | [sink2] 74 | enable=0 75 | type=1 76 | #1=mp4 2=mkv 77 | container=1 78 | #1=h264 2=h265 79 | codec=1 80 | sync=0 81 | bitrate=2000000 82 | output-file=out.mp4 83 | source-id=0 84 | 85 | [sink3] 86 | enable=1 87 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 88 | type=4 89 | #1=h264 2=h265 90 | codec=1 91 | sync=0 92 | bitrate=1500000 93 | # set below properties in case of RTSPStreaming 94 | rtsp-port=8554 95 | udp-port=5401 96 | 97 | [osd] 98 | enable=1 99 | gpu-id=0 100 | border-width=4 101 | text-size=15 102 | text-color=1;1;1;1; 103 | text-bg-color=0.3;0.3;0.3;1 104 | font=Arial 105 | show-clock=0 106 | clock-x-offset=800 107 | clock-y-offset=820 108 | clock-text-size=12 109 | clock-color=1;0;0;0 110 | nvbuf-memory-type=0 111 | 112 | [streammux] 113 | ##Boolean property to inform muxer that sources are live 114 | live-source=1 115 | batch-size=1 116 | ##time out in usec, to wait after the first buffer is available 117 | ##to push the batch even if the complete batch is not formed 118 | batched-push-timeout=40000 119 | ## Set muxer output width and height 120 | width=1280 121 | height=720 122 | ## If set to TRUE, system timestamp will be attached as ntp timestamp 123 | ## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached 124 | attach-sys-ts-as-ntp=1 125 | # config-file property is mandatory for any gie section. 126 | # Other properties are optional and if set will override the properties set in 127 | # the infer config file. 128 | 129 | [primary-gie] 130 | enable=1 131 | model-engine-file=../../CUSTOM_VISION_AI/model.onnx_b1_gpu0_fp32.engine 132 | #Required to display the PGIE labels, should be added even when using config-file 133 | #property 134 | batch-size=1 135 | #Required by the app for OSD, not a plugin property 136 | bbox-border-color0=1;0;0;1 137 | bbox-border-color1=0;1;1;1 138 | bbox-border-color2=0;0;1;1 139 | bbox-border-color3=0;1;0;1 140 | interval=0 141 | #Required by the app for SGIE, when used along with config-file property 142 | gie-unique-id=1 143 | config-file=config_infer_primary_CustomVisionAI.txt 144 | 145 | [tracker] 146 | enable=0 147 | tracker-width=480 148 | tracker-height=272 149 | ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so 150 | #ll-config-file required for DCF/IOU only 151 | #ll-config-file=iou_config.txt 152 | gpu-id=0 153 | 154 | [tests] 155 | file-loop=1 156 | 157 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/DSConfig-YoloV3.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | #ENSURE that you have run the included downloadYoloWeights.sh prior to using this configuration! 10 | 11 | [application] 12 | enable-perf-measurement=0 13 | perf-measurement-interval-sec=5 14 | #gie-kitti-output-dir=streamscl 15 | 16 | [tiled-display] 17 | enable=1 18 | rows=1 19 | columns=1 20 | width=1280 21 | height=640 22 | gpu-id=0 23 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 24 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla 25 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla 26 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla 27 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 28 | nvbuf-memory-type=0 29 | 30 | [source0] 31 | enable=1 32 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 5=CSI 33 | type=4 34 | uri=rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4 35 | rtsp-reconnect-interval-sec=60 36 | #latency=1000 37 | #type=5 38 | #camera-width=3280 39 | #camera-height=2464 40 | #camera-fps-n=20 41 | #camera-fps-d=1 42 | #camera-csi-sensor-id=0 43 | # (0): memtype_device - Memory type Device 44 | # (1): memtype_pinned - Memory type Host Pinned 45 | # (2): memtype_unified - Memory type Unified 46 | cudadec-memtype=0 47 | 48 | [sink0] 49 | enable=1 50 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 51 | type=2 52 | sync=0 53 | source-id=0 54 | gpu-id=0 55 | qos=0 56 | nvbuf-memory-type=0 57 | overlay-id=1 58 | 59 | [sink1] 60 | enable=1 61 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker 62 | type=6 63 | sync=0 64 | msg-conv-config=msgconv_config.txt 65 | #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload 66 | #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal 67 | #(256): PAYLOAD_RESERVED - Reserved type 68 | #(257): PAYLOAD_CUSTOM - Custom schema payload 69 | msg-conv-payload-type=1 70 | msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_azure_edge_proto.so 71 | topic=mytopic 72 | #Optional: 73 | #msg-broker-config=../../../../libs/azure_protocol_adaptor/module_client/cfg_azure.txt 74 | 75 | [sink2] 76 | enable=0 77 | type=1 78 | #1=mp4 2=mkv 79 | container=1 80 | #1=h264 2=h265 81 | codec=1 82 | sync=0 83 | bitrate=2000000 84 | output-file=out.mp4 85 | source-id=0 86 | 87 | [sink3] 88 | enable=1 89 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 90 | type=4 91 | #1=h264 2=h265 92 | codec=1 93 | sync=0 94 | bitrate=3000000 95 | # set below properties in case of RTSPStreaming 96 | rtsp-port=8554 97 | udp-port=5401 98 | 99 | [osd] 100 | enable=1 101 | gpu-id=0 102 | border-width=4 103 | text-size=15 104 | text-color=1;1;1;1; 105 | text-bg-color=0.3;0.3;0.3;1 106 | font=Arial 107 | show-clock=0 108 | clock-x-offset=800 109 | clock-y-offset=820 110 | clock-text-size=12 111 | clock-color=1;0;0;0 112 | nvbuf-memory-type=0 113 | 114 | [streammux] 115 | ##Boolean property to inform muxer that sources are live 116 | live-source=1 117 | batch-size=1 118 | ##time out in usec, to wait after the first buffer is available 119 | ##to push the batch even if the complete batch is not formed 120 | batched-push-timeout=40000 121 | ## Set muxer output width and height 122 | width=1280 123 | height=720 124 | ## If set to TRUE, system timestamp will be attached as ntp timestamp 125 | ## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached 126 | attach-sys-ts-as-ntp=1 127 | 128 | # config-file property is mandatory for any gie section. 129 | # Other properties are optional and if set will override the properties set in 130 | # the infer config file. 131 | [primary-gie] 132 | enable=1 133 | gpu-id=0 134 | nvbuf-memory-type=0 135 | batch-size=1 136 | #Required by the app for OSD, not a plugin property 137 | bbox-border-color0=1;1;1;1 138 | bbox-border-color1=0;0;1;0 139 | bbox-border-color2=0;0;1;1 140 | bbox-border-color3=0;1;0;0 141 | bbox-border-color7=0;1;0;1 142 | bbox-border-color8=0;1;1;0 143 | bbox-border-color15=0;1;1;1 144 | bbox-border-color16=1;0;0;0 145 | interval=1 146 | #Required by the app for SGIE, when used along with config-file property 147 | gie-unique-id=1 148 | config-file=config_infer_primary_yoloV3.txt 149 | 150 | [tracker] 151 | enable=0 152 | tracker-width=480 153 | tracker-height=272 154 | ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so 155 | #ll-config-file required for DCF/IOU only 156 | #ll-config-file=iou_config.txt 157 | gpu-id=0 158 | 159 | [tests] 160 | file-loop=1 -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/DSConfig-YoloV3Tiny.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | #ENSURE that you have run the included downloadYoloWeights.sh prior to using this configuration! 10 | 11 | [application] 12 | enable-perf-measurement=0 13 | perf-measurement-interval-sec=5 14 | #gie-kitti-output-dir=streamscl 15 | 16 | [tiled-display] 17 | enable=1 18 | rows=1 19 | columns=1 20 | width=1280 21 | height=640 22 | gpu-id=0 23 | #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform 24 | #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla 25 | #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla 26 | #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla 27 | #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson 28 | nvbuf-memory-type=0 29 | 30 | [source0] 31 | enable=1 32 | #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP 5=CSI 33 | type=4 34 | uri=rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4 35 | rtsp-reconnect-interval-sec=60 36 | #latency=1000 37 | #type=5 38 | #camera-width=3280 39 | #camera-height=2464 40 | #camera-fps-n=20 41 | #camera-fps-d=1 42 | #camera-csi-sensor-id=0 43 | # (0): memtype_device - Memory type Device 44 | # (1): memtype_pinned - Memory type Host Pinned 45 | # (2): memtype_unified - Memory type Unified 46 | cudadec-memtype=0 47 | 48 | [sink0] 49 | enable=1 50 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 51 | type=2 52 | sync=0 53 | source-id=0 54 | gpu-id=0 55 | qos=0 56 | nvbuf-memory-type=0 57 | overlay-id=1 58 | 59 | [sink1] 60 | enable=1 61 | #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker 62 | type=6 63 | sync=0 64 | msg-conv-config=msgconv_config.txt 65 | #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload 66 | #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal 67 | #(256): PAYLOAD_RESERVED - Reserved type 68 | #(257): PAYLOAD_CUSTOM - Custom schema payload 69 | msg-conv-payload-type=1 70 | msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_azure_edge_proto.so 71 | topic=mytopic 72 | #Optional: 73 | #msg-broker-config=../../../../libs/azure_protocol_adaptor/module_client/cfg_azure.txt 74 | 75 | [sink2] 76 | enable=0 77 | type=1 78 | #1=mp4 2=mkv 79 | container=1 80 | #1=h264 2=h265 81 | codec=1 82 | sync=0 83 | bitrate=2000000 84 | output-file=out.mp4 85 | source-id=0 86 | 87 | [sink3] 88 | enable=1 89 | #Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 90 | type=4 91 | #1=h264 2=h265 92 | codec=1 93 | sync=0 94 | bitrate=3000000 95 | # set below properties in case of RTSPStreaming 96 | rtsp-port=8554 97 | udp-port=5401 98 | 99 | [osd] 100 | enable=1 101 | gpu-id=0 102 | border-width=4 103 | text-size=15 104 | text-color=1;1;1;1; 105 | text-bg-color=0.3;0.3;0.3;1 106 | font=Arial 107 | show-clock=0 108 | clock-x-offset=800 109 | clock-y-offset=820 110 | clock-text-size=12 111 | clock-color=1;0;0;0 112 | nvbuf-memory-type=0 113 | 114 | [streammux] 115 | ##Boolean property to inform muxer that sources are live 116 | live-source=1 117 | batch-size=1 118 | ##time out in usec, to wait after the first buffer is available 119 | ##to push the batch even if the complete batch is not formed 120 | batched-push-timeout=40000 121 | ## Set muxer output width and height 122 | width=1280 123 | height=720 124 | ## If set to TRUE, system timestamp will be attached as ntp timestamp 125 | ## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached 126 | attach-sys-ts-as-ntp=1 127 | 128 | # config-file property is mandatory for any gie section. 129 | # Other properties are optional and if set will override the properties set in 130 | # the infer config file. 131 | [primary-gie] 132 | enable=1 133 | gpu-id=0 134 | nvbuf-memory-type=0 135 | batch-size=1 136 | #Required by the app for OSD, not a plugin property 137 | bbox-border-color0=1;1;1;1 138 | bbox-border-color1=0;0;1;0 139 | bbox-border-color2=0;0;1;1 140 | bbox-border-color3=0;1;0;0 141 | bbox-border-color7=0;1;0;1 142 | bbox-border-color8=0;1;1;0 143 | bbox-border-color15=0;1;1;1 144 | bbox-border-color16=1;0;0;0 145 | interval=0 146 | #Required by the app for SGIE, when used along with config-file property 147 | gie-unique-id=1 148 | config-file=config_infer_primary_yoloV3_tiny.txt 149 | 150 | [tracker] 151 | enable=0 152 | tracker-width=480 153 | tracker-height=272 154 | ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_mot_klt.so 155 | #ll-config-file required for DCF/IOU only 156 | #ll-config-file=iou_config.txt 157 | gpu-id=0 158 | 159 | [tests] 160 | file-loop=1 -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/config_infer_primary_CustomVisionAI.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 2 | # 3 | # NVIDIA Corporation and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA Corporation is strictly prohibited. 8 | 9 | # Following properties are mandatory when engine files are not specified: 10 | # int8-calib-file(Only in INT8) 11 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 12 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 13 | # ONNX: onnx-file 14 | # 15 | # Mandatory properties for detectors: 16 | # num-detected-classes 17 | # 18 | # Optional properties for detectors: 19 | # enable-dbscan(Default=false), interval(Primary mode only, Default=0) 20 | # custom-lib-path, 21 | # parse-bbox-func-name 22 | # 23 | # Mandatory properties for classifiers: 24 | # classifier-threshold, is-classifier 25 | # 26 | # Optional properties for classifiers: 27 | # classifier-async-mode(Secondary mode only, Default=false) 28 | # 29 | # Optional properties in secondary mode: 30 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 31 | # input-object-min-width, input-object-min-height, input-object-max-width, 32 | # input-object-max-height 33 | # 34 | # Following properties are always recommended: 35 | # batch-size(Default=1) 36 | # 37 | # Other optional properties: 38 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 39 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 40 | # mean-file, gie-unique-id(Default=0), offsets, gie-mode (Default=1 i.e. primary), 41 | # custom-lib-path, network-mode(Default=0 i.e FP32) 42 | # 43 | # The values in the config file are overridden by values set through GObject 44 | # properties. 45 | 46 | # [property] 47 | # gpu-id=0 48 | # net-scale-factor=0.0039215697906911373 49 | # onnx-file=../custom_models/model.onnx 50 | # labelfile-path=../custom_models/labels.txt 51 | # batch-size=1 52 | # process-mode=1 53 | # model-color-format=0 54 | # ## 0=FP32, 1=INT8, 2=FP16 mode 55 | # network-mode=2 56 | # num-detected-classes=4 57 | # interval=0 58 | # gie-unique-id=1 59 | # output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid 60 | # #parse-bbox-func-name=NvDsInferParseCustomResnet 61 | # #custom-lib-path=/path/to/libnvdsparsebbox.so 62 | # #enable-dbscan=1 63 | 64 | [property] 65 | gpu-id=0 66 | net-scale-factor=1 67 | #0=RGB, 1=BGR 68 | model-color-format=1 69 | onnx-file=../../CUSTOM_VISION_AI/model.onnx 70 | labelfile-path=../../CUSTOM_VISION_AI/labels.txt 71 | ## 0=FP32, 1=INT8, 2=FP16 mode 72 | network-mode=0 73 | num-detected-classes=4 74 | gie-unique-id=1 75 | is-classifier=0 76 | maintain-aspect-ratio=1 77 | #output-blob-names=output-blob-names=coverage;bbox 78 | parse-bbox-func-name=NvDsInferParseCustomYoloV2Tiny 79 | custom-lib-path=../YoloParser/CustomVision_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so 80 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/config_infer_primary_yoloV3.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in 12 | # all copies or substantial portions of the Software. 13 | # 14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | # DEALINGS IN THE SOFTWARE. 21 | ################################################################################ 22 | 23 | # Following properties are mandatory when engine files are not specified: 24 | # int8-calib-file(Only in INT8), model-file-format 25 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 26 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 27 | # ONNX: onnx-file 28 | # 29 | # Mandatory properties for detectors: 30 | # num-detected-classes 31 | # 32 | # Optional properties for detectors: 33 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 34 | # custom-lib-path 35 | # parse-bbox-func-name 36 | # 37 | # Mandatory properties for classifiers: 38 | # classifier-threshold, is-classifier 39 | # 40 | # Optional properties for classifiers: 41 | # classifier-async-mode(Secondary mode only, Default=false) 42 | # 43 | # Optional properties in secondary mode: 44 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 45 | # input-object-min-width, input-object-min-height, input-object-max-width, 46 | # input-object-max-height 47 | # 48 | # Following properties are always recommended: 49 | # batch-size(Default=1) 50 | # 51 | # Other optional properties: 52 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 53 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 54 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 55 | # custom-lib-path, network-mode(Default=0 i.e FP32) 56 | # 57 | # The values in the config file are overridden by values set through GObject 58 | # properties. 59 | 60 | [property] 61 | gpu-id=0 62 | net-scale-factor=0.0039215697906911373 63 | # 0=RGB, 1=BGR, 2=GRAYSCALE, accoding number of channels in yolo.cfg (1=GRAYSCALE, 3=RGB) 64 | model-color-format=0 65 | custom-network-config=../../YOLOV3/yolov3.cfg 66 | model-file=../../YOLOV3/yolov3.weights 67 | model-engine-file=../../YOLOV3/yolov3_model_b1_gpu0_fp16.engine 68 | labelfile-path=../../YOLOV3/labels.txt 69 | # Number of sources 70 | batch-size=1 71 | # 0=FP32, 1=INT8, 2=FP16 mode 72 | network-mode=2 73 | # Number of classes in your custom model 74 | num-detected-classes=80 75 | # Interval of detection (FPS increase if > 0) 76 | interval=1 77 | gie-unique-id=1 78 | # 1:Primary, 2:Secondary 79 | process-mode=1 80 | # 0:Detector, 1:Classifier, 2:Segmentation 81 | network-type=0 82 | is-classifier=0 83 | # 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering) 84 | cluster-mode=2 85 | maintain-aspect-ratio=1 86 | parse-bbox-func-name=NvDsInferParseCustomYoloV3 87 | custom-lib-path=../YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so 88 | engine-create-func-name=NvDsInferYoloCudaEngineGet 89 | 90 | [class-attrs-all] 91 | # Darknet nms 92 | nms-iou-threshold=0.35 93 | # Darknet conf_thresh 94 | pre-cluster-threshold=0.25 95 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/config_infer_primary_yoloV3_tiny.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 3 | # 4 | # Permission is hereby granted, free of charge, to any person obtaining a 5 | # copy of this software and associated documentation files (the "Software"), 6 | # to deal in the Software without restriction, including without limitation 7 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 | # and/or sell copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following conditions: 10 | # 11 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | # DEALINGS IN THE SOFTWARE. 20 | ################################################################################ 21 | 22 | # Following properties are mandatory when engine files are not specified: 23 | # int8-calib-file(Only in INT8), model-file-format 24 | # Caffemodel mandatory properties: model-file, proto-file, output-blob-names 25 | # UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names 26 | # ONNX: onnx-file 27 | # 28 | # Mandatory properties for detectors: 29 | # num-detected-classes 30 | # 31 | # Optional properties for detectors: 32 | # cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) 33 | # custom-lib-path 34 | # parse-bbox-func-name 35 | # 36 | # Mandatory properties for classifiers: 37 | # classifier-threshold, is-classifier 38 | # 39 | # Optional properties for classifiers: 40 | # classifier-async-mode(Secondary mode only, Default=false) 41 | # 42 | # Optional properties in secondary mode: 43 | # operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), 44 | # input-object-min-width, input-object-min-height, input-object-max-width, 45 | # input-object-max-height 46 | # 47 | # Following properties are always recommended: 48 | # batch-size(Default=1) 49 | # 50 | # Other optional properties: 51 | # net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), 52 | # model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, 53 | # mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), 54 | # custom-lib-path, network-mode(Default=0 i.e FP32) 55 | # 56 | # The values in the config file are overridden by values set through GObject 57 | # properties. 58 | 59 | [property] 60 | gpu-id=0 61 | net-scale-factor=0.0039215697906911373 62 | # 0=RGB, 1=BGR, 2=GRAYSCALE, accoding number of channels in yolo.cfg (1=GRAYSCALE, 3=RGB) 63 | model-color-format=0 64 | custom-network-config=../../YOLOV3/yolov3-tiny.cfg 65 | model-file=../../YOLOV3/yolov3-tiny.weights 66 | model-engine-file=../../YOLOV3/yolov3_tiny_model_b1_gpu0_fp16.engine 67 | labelfile-path=../../YOLOV3/labels.txt 68 | # Number of sources 69 | batch-size=1 70 | # 0=FP32, 1=INT8, 2=FP16 mode 71 | network-mode=2 72 | # Number of classes in your custom model 73 | num-detected-classes=80 74 | # Interval of detection (FPS increase if > 0) 75 | interval=1 76 | gie-unique-id=1 77 | # 1:Primary, 2:Secondary 78 | process-mode=1 79 | # 0:Detector, 1:Classifier, 2:Segmentation 80 | network-type=0 81 | is-classifier=0 82 | # 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering) 83 | cluster-mode=2 84 | maintain-aspect-ratio=1 85 | parse-bbox-func-name=NvDsInferParseCustomYoloV3Tiny 86 | custom-lib-path=../YoloParser/CustomYolo_DeepStream6.1.1_JetPack5.0.2/libnvdsinfer_custom_impl_Yolo.so 87 | engine-create-func-name=NvDsInferYoloCudaEngineGet 88 | 89 | [class-attrs-all] 90 | # Darknet nms 91 | nms-iou-threshold=0.45 92 | # Darknet conf_thresh 93 | pre-cluster-threshold=0.25 94 | -------------------------------------------------------------------------------- /services/DEEPSTREAM/configs/msgconv_config.txt: -------------------------------------------------------------------------------- 1 | 2 | # Copyright (c) 2018 NVIDIA Corporation. All rights reserved. 3 | # 4 | # NVIDIA Corporation and its licensors retain all intellectual property 5 | # and proprietary rights in and to this software, related documentation 6 | # and any modifications thereto. Any use, reproduction, disclosure or 7 | # distribution of this software and related documentation without an express 8 | # license agreement from NVIDIA Corporation is strictly prohibited. 9 | 10 | [sensor0] 11 | enable=1 12 | type=Camera 13 | id=Camera_0 14 | location=45.293701447;-75.8303914499;48.1557479338 15 | description=Example Camera Entry 16 | coordinate=5.2;10.1;11.2 17 | -------------------------------------------------------------------------------- /services/POWER_BI/DeepStream+PowerBI.pbit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toolboc/Intelligent-Video-Analytics-with-NVIDIA-Jetson-and-Microsoft-Azure/1bea7c4393aa92fb76bd360e1df421c95d8646f2/services/POWER_BI/DeepStream+PowerBI.pbit -------------------------------------------------------------------------------- /services/TIME_SERIES_INSIGHTS/Hierarchies/Locations.json: -------------------------------------------------------------------------------- 1 | { 2 | "put": [ 3 | { 4 | "id": "a37e38ca-0fd3-4ccf-a661-ec195a090ad7", 5 | "name": "Locations", 6 | "source": { 7 | "instanceFieldNames": [ 8 | "Address" 9 | ] 10 | } 11 | } 12 | ] 13 | } -------------------------------------------------------------------------------- /services/TIME_SERIES_INSIGHTS/Types/ObjectDetectionType.json: -------------------------------------------------------------------------------- 1 | { 2 | "put": [ 3 | { 4 | "id": "64bfceca-e1d8-415a-adc8-0b8228056048", 5 | "name": "ObjectDetectionType", 6 | "variables": { 7 | "Detections": { 8 | "kind": "categorical", 9 | "value": { 10 | "tsx": "$event.[object].String" 11 | }, 12 | "filter": { 13 | "tsx": "($event.[object].String) != null" 14 | }, 15 | "interpolation": null, 16 | "categories": [ 17 | { 18 | "label": "person", 19 | "values": [ 20 | "person" 21 | ], 22 | "annotations": {} 23 | }, 24 | { 25 | "label": "vehicle", 26 | "values": [ 27 | "car" 28 | ], 29 | "annotations": {} 30 | }, 31 | { 32 | "label": "bicycle", 33 | "values": [ 34 | "bicycle" 35 | ], 36 | "annotations": {} 37 | }, 38 | { 39 | "label": "roadsign", 40 | "values": [ 41 | "roadsign" 42 | ], 43 | "annotations": {} 44 | } 45 | ], 46 | "defaultCategory": { 47 | "label": "None", 48 | "annotations": {} 49 | } 50 | }, 51 | "Person": { 52 | "kind": "numeric", 53 | "value": { 54 | "tsx": "$event.[count].Double" 55 | }, 56 | "filter": { 57 | "tsx": "$event.[object].String='person'" 58 | }, 59 | "interpolation": null, 60 | "aggregation": { 61 | "tsx": "max($value)" 62 | } 63 | }, 64 | "Vehicle": { 65 | "kind": "numeric", 66 | "value": { 67 | "tsx": "$event.[count].Double" 68 | }, 69 | "filter": { 70 | "tsx": "$event.[object].String='car'" 71 | }, 72 | "interpolation": null, 73 | "aggregation": { 74 | "tsx": "max($value)" 75 | } 76 | } 77 | } 78 | } 79 | ] 80 | } -------------------------------------------------------------------------------- /services/YOLOV3/downloadYoloWeights.sh: -------------------------------------------------------------------------------- 1 | #YoloV3, 2 | echo "Downloading yolov3 config and weights files ... " 3 | wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -q --show-progress 4 | wget https://pjreddie.com/media/files/yolov3.weights -q --show-progress 5 | 6 | #YoloV3Tiny, 7 | echo "Downloading yolov3-tiny config and weights files ... " 8 | wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3-tiny.cfg -q --show-progress 9 | wget https://pjreddie.com/media/files/yolov3-tiny.weights -q --show-progress -------------------------------------------------------------------------------- /services/YOLOV3/labels.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush --------------------------------------------------------------------------------