├── .devcontainer └── devcontainer.json ├── .gitignore ├── .vscode └── launch.json ├── GitVersion.yml ├── LICENSE ├── azure-pipelines.yml ├── code-of-conduct.md ├── deployment.template.json ├── documentation ├── contributing.md ├── customize-node-red.md ├── customize-sample-oee.md ├── customize-sample-other.md ├── dashboarding-sample.md ├── deployment-devops.md ├── deployment-manual.md ├── deployment-vscode.md ├── documentation-guide.md ├── flux-query-reference.md ├── manufacturing-kpis.md └── setup-edge-environment.md ├── media ├── OEEgauge.png ├── OfflineDashboards_diag.png ├── OfflineDashboards_diag0.png ├── OfflineDashboards_diag1.png ├── OfflineDashboards_diag2.png ├── availabilitygauge.png ├── availabilitygraph.png ├── dashboard-asset.png ├── dashboard-production.png ├── dashboard-sitelevel.png ├── dashboard.png ├── dataflow.png ├── edge-modules.png ├── edge-routes.png ├── edge-success.png ├── grafana-dash.png ├── idealrunrate.png ├── nodered_sim1.png ├── nodered_sim2.png ├── oeegraph.png ├── performancegauge.png ├── performancegraph.png ├── qualitygauge.png ├── qualitygraph.png ├── retrieve-connection-string.png ├── sitecombo.png ├── timeinterval.png ├── timeinterval2.png └── vscode-source-control.jpg ├── modules ├── edgetoinfluxdb │ ├── Dockerfile │ ├── flows.json │ ├── flows_cred.json │ ├── module.json │ └── settings.js ├── grafana │ ├── Dockerfile │ ├── azure-pipelines.yml │ ├── grafana-provisioning │ │ ├── dashboards │ │ │ ├── AssetBenchmarking.json │ │ │ ├── ProductionAtAGlance.json │ │ │ ├── SiteLevelPerformance.json │ │ │ └── dashboard.yml │ │ └── datasources │ │ │ └── datasource.yml │ └── module.json ├── influxdb │ ├── Dockerfile │ ├── azure-pipelines.yml │ ├── influxdb.conf │ ├── initdb.iql │ └── module.json ├── opcpublisher │ ├── Dockerfile │ ├── module.json │ └── publishedNodes.json └── opcsimulator │ ├── Dockerfile │ ├── flows.json │ ├── flows_cred.json │ ├── module.json │ └── settings.js └── readme.md /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.117.1/containers/ubuntu-18.04-git 3 | { 4 | "name": "IoT Edge Dev", 5 | "image": "mcr.microsoft.com/iotedge/iotedgedev:latest", 6 | 7 | // Give the iotedgedev user access to docker 8 | "postCreateCommand": "sudo usermod -aG docker iotedgedev && sudo chown root:docker /var/run/docker.sock && sudo chown iotedgedev:iotedgedev /home/iotedgedev/.azure && sudo chown iotedgedev:iotedgedev /home/iotedgedev/.gitconfig", 9 | 10 | // Set *default* container specific settings.json values on container create. 11 | "settings": { 12 | "terminal.integrated.shell.linux": "/bin/bash" 13 | }, 14 | 15 | // Add the IDs of extensions you want installed when the container is created. 16 | "extensions": [ 17 | "vsciot-vscode.azure-iot-tools", 18 | "ms-vscode-remote.remote-containers" 19 | ], 20 | 21 | "mounts": [ 22 | "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind", 23 | "source=${localEnv:HOME}${localEnv:USERPROFILE}/.azure,target=/home/iotedgedev/.azure,type=bind", 24 | ] 25 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config/ 2 | .vscode/settings.json 3 | .env 4 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "SampleModule Remote Debug (.NET Core)", 6 | "type": "coreclr", 7 | "request": "attach", 8 | "processId": "${command:pickRemoteProcess}", 9 | "pipeTransport": { 10 | "pipeProgram": "docker", 11 | "pipeArgs": [ 12 | "exec", 13 | "-i", 14 | "SampleModule", 15 | "sh", 16 | "-c" 17 | ], 18 | "debuggerPath": "~/vsdbg/vsdbg", 19 | "pipeCwd": "${workspaceFolder}", 20 | "quoteArgs": true 21 | }, 22 | "sourceFileMap": { 23 | "/app": "${workspaceFolder}/modules/SampleModule" 24 | }, 25 | "justMyCode": true 26 | }, 27 | { 28 | "name": "SampleModule Local Debug (.NET Core)", 29 | "type": "coreclr", 30 | "request": "launch", 31 | "program": "${workspaceRoot}/modules/SampleModule/bin/Debug/netcoreapp3.1/SampleModule.dll", 32 | "args": [], 33 | "cwd": "${workspaceRoot}/modules/SampleModule", 34 | "internalConsoleOptions": "openOnSessionStart", 35 | "stopAtEntry": false, 36 | "console": "internalConsole", 37 | "env": { 38 | "EdgeHubConnectionString": "${config:azure-iot-edge.EdgeHubConnectionString}", 39 | "EdgeModuleCACertificateFile": "${config:azure-iot-edge.EdgeModuleCACertificateFile}" 40 | } 41 | } 42 | ] 43 | } -------------------------------------------------------------------------------- /GitVersion.yml: -------------------------------------------------------------------------------- 1 | mode: Mainline 2 | branches: {} 3 | ignore: 4 | sha: [] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | batch: true 3 | branches: 4 | include: 5 | - master 6 | paths: 7 | exclude: 8 | - readme.md 9 | - documentation/* 10 | - media/* 11 | pool: 12 | vmImage: ubuntu-18.04 13 | pr: 14 | autoCancel: true 15 | 16 | variables: 17 | CONTAINER_REGISTRY_ADDRESS: $(ACR_NAME).azurecr.io 18 | 19 | stages: 20 | - stage: Build 21 | jobs: 22 | - job: Build 23 | steps: 24 | 25 | # Determine module verion 26 | - task: UseGitVersion@5 27 | inputs: 28 | versionSpec: '5.x.x' 29 | 30 | # Set container verion to be Semantic Version 31 | - script: | 32 | echo "##vso[task.setvariable variable=CONTAINER_VERSION_TAG]$(GitVersion.AssemblySemVer)" 33 | displayName: Set container verion to be Semantic Version 34 | 35 | # Build Modules 36 | - task: AzureIoTEdge@2 37 | inputs: 38 | action: 'Build module images' 39 | templateFilePath: 'deployment.template.json' 40 | defaultPlatform: 'amd64' 41 | displayName: Build Edge Modules 42 | 43 | # Push Modules 44 | - task: AzureIoTEdge@2 45 | inputs: 46 | action: 'Push module images' 47 | containerregistrytype: 'Azure Container Registry' 48 | azureSubscriptionEndpoint: $(AZURE_SERVICE_CONNECTION) 49 | azureContainerRegistry: '{"loginServer":"$(CONTAINER_REGISTRY_ADDRESS)", "id" : "/subscriptions/$(AZURE_SUBSCRIPTION_ID)/resourceGroups/$(ACR_RESOURCE_GROUP)/providers/Microsoft.ContainerRegistry/registries/$(ACR_NAME)"}' 50 | templateFilePath: 'deployment.template.json' 51 | defaultPlatform: 'amd64' 52 | fillRegistryCredential: 'true' 53 | displayName: Push Edge Modules 54 | 55 | # Copy the generated manifest to the artifact staging directory 56 | - script: | 57 | cp config/deployment.amd64.json $(Build.ArtifactStagingDirectory) 58 | displayName: Copy manifest to artifact directory 59 | 60 | # Publish the deployment manifest as a pipeline artifact 61 | - publish: $(Build.ArtifactStagingDirectory) 62 | artifact: config 63 | 64 | - stage: Release 65 | jobs: 66 | - job: Release 67 | steps: 68 | # Download deployment manifest from build stage 69 | - download: current 70 | artifact: config 71 | 72 | # Deploy to all edge devices 73 | - task: AzureIoTEdge@2 74 | displayName: Create Deployment 75 | inputs: 76 | action: Deploy to IoT Edge devices 77 | deploymentFilePath: $(Pipeline.Workspace)/config/deployment.amd64.json 78 | azureSubscription: $(AZURE_SERVICE_CONNECTION) 79 | iothubname: $(IOT_HUB_NAME) 80 | deploymentid: offline-dashboard 81 | priority: '10' 82 | deviceOption: Multiple Devices 83 | targetcondition: $(DEPLOYMENT_TARGET_CONDITION) 84 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /deployment.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-template": "2.0.0", 3 | "modulesContent": { 4 | "$edgeAgent": { 5 | "properties.desired": { 6 | "schemaVersion": "1.0", 7 | "runtime": { 8 | "type": "docker", 9 | "settings": { 10 | "minDockerVersion": "v1.25", 11 | "loggingOptions": "", 12 | "registryCredentials": { 13 | "default": { 14 | "username": "${CONTAINER_REGISTRY_USERNAME}", 15 | "password": "${CONTAINER_REGISTRY_PASSWORD}", 16 | "address": "${CONTAINER_REGISTRY_ADDRESS}" 17 | } 18 | } 19 | } 20 | }, 21 | "systemModules": { 22 | "edgeAgent": { 23 | "type": "docker", 24 | "settings": { 25 | "image": "mcr.microsoft.com/azureiotedge-agent:1.2", 26 | "createOptions": {} 27 | } 28 | }, 29 | "edgeHub": { 30 | "type": "docker", 31 | "status": "running", 32 | "restartPolicy": "always", 33 | "settings": { 34 | "image": "mcr.microsoft.com/azureiotedge-hub:1.2", 35 | "createOptions": { 36 | "HostConfig": { 37 | "PortBindings": { 38 | "5671/tcp": [ 39 | { 40 | "HostPort": "5671" 41 | } 42 | ], 43 | "8883/tcp": [ 44 | { 45 | "HostPort": "8883" 46 | } 47 | ], 48 | "443/tcp": [ 49 | { 50 | "HostPort": "443" 51 | } 52 | ] 53 | } 54 | } 55 | } 56 | } 57 | } 58 | }, 59 | "modules": { 60 | "influxdb": { 61 | "type": "docker", 62 | "status": "running", 63 | "restartPolicy": "always", 64 | "settings": { 65 | "image": "${MODULES.influxdb}", 66 | "createOptions": { 67 | "HostConfig": { 68 | "Binds": [ 69 | "/influxdata:/var/lib/influxdb" 70 | ], 71 | "PortBindings": { 72 | "8086/tcp": [ 73 | { 74 | "HostPort": "8086" 75 | } 76 | ] 77 | } 78 | } 79 | } 80 | } 81 | }, 82 | "edgetoinfluxdb": { 83 | "type": "docker", 84 | "status": "running", 85 | "restartPolicy": "always", 86 | "settings": { 87 | "image": "${MODULES.edgetoinfluxdb}", 88 | "createOptions": { 89 | "HostConfig": { 90 | "PortBindings": { 91 | "1880/tcp": [ 92 | { 93 | "HostPort": "1881" 94 | } 95 | ] 96 | } 97 | } 98 | } 99 | } 100 | }, 101 | "grafana": { 102 | "type": "docker", 103 | "status": "running", 104 | "restartPolicy": "always", 105 | "settings": { 106 | "image": "${MODULES.grafana}", 107 | "createOptions": { 108 | "Env": [ 109 | "GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}" 110 | ], 111 | "Volumes": { 112 | "/etc/grafana/provisioning": {} 113 | }, 114 | "HostConfig": { 115 | "PortBindings": { 116 | "3000/tcp": [ 117 | { 118 | "HostPort": "3000" 119 | } 120 | ] 121 | } 122 | } 123 | } 124 | } 125 | }, 126 | "opcpublisher": { 127 | "type": "docker", 128 | "status": "running", 129 | "restartPolicy": "always", 130 | "settings": { 131 | "image": "${MODULES.opcpublisher}", 132 | "createOptions": { 133 | "Hostname": "publisher", 134 | "Cmd": [ 135 | "--pf=/app/pn.json", 136 | "--aa" 137 | ] 138 | } 139 | } 140 | }, 141 | "opcsimulator": { 142 | "type": "docker", 143 | "status": "running", 144 | "restartPolicy": "always", 145 | "settings": { 146 | "image": "${MODULES.opcsimulator}", 147 | "createOptions": { 148 | "HostConfig": { 149 | "PortBindings": { 150 | "1880/tcp": [ 151 | { 152 | "HostPort": "1880" 153 | } 154 | ] 155 | } 156 | } 157 | } 158 | } 159 | } 160 | } 161 | } 162 | }, 163 | "$edgeHub": { 164 | "properties.desired": { 165 | "schemaVersion": "1.0", 166 | "routes": { 167 | "cloud": "FROM /messages/* INTO $upstream", 168 | "opc": "FROM /messages/modules/opcpublisher/* INTO BrokeredEndpoint(\"/modules/edgetoinfluxdb/inputs/input1\")" 169 | }, 170 | "storeAndForwardConfiguration": { 171 | "timeToLiveSecs": 7200 172 | } 173 | } 174 | } 175 | } 176 | } -------------------------------------------------------------------------------- /documentation/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This project welcomes contributions from the community. All changes be they small or large, need to adhere to the [documentation standards](documentation-guide.md), so please ensure you are familiar with these while developing to avoid delays when the change is being reviewed. 4 | 5 | **Table of contents** 6 | * [Submission process](#submission-process) 7 | * [Proposing feature requests](#proposing-feature-requests) 8 | * [Contribution process](#contribution-process) 9 | 10 | ## Submission process 11 | 12 | We provide several paths to enable developers to contribute, all starting with [creating a new issue](https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding/issues/new). 13 | 14 | When filing an issue, please help us understand what your intent is: 15 | 16 | - **Bug report** - Functionality issue with one of the code components 17 | - **Documentation issue** - Issue with the documentation 18 | - **Feature request** - Proposal for a new feature 19 | 20 | ## Proposing feature requests 21 | 22 | When requesting a new feature, it is important to document the customer benefit / problem to be solved. Once submitted, the request will be reviewed and discussed on GitHub. We encourage open and constructive discussion of each feature proposal to ensure that the work is beneficial to a large segment of customers. 23 | 24 | To avoid needing to rework the feature, it is generally recommended that development of the feature does not begin during the review phase. Many times, the community review process uncovers one or more issues that may require significant changes in the proposed implementation. 25 | 26 | > [!NOTE] 27 | > If you wish to work on something that already exists on our backlog, you can use that work item as your proposal. Be sure to also comment on the task notifying maintainers that you're working towards completing it. 28 | 29 | ## Contribution process 30 | 31 | To get started, simply follow these steps: 32 | 33 | 1. Fork the repository. Click on the "Fork" button on the top right of the page and follow the flow. 34 | 1. Create a branch in your fork to make it easier to isolate any changes until ready for submission. 35 | 1. Implement the bug fix or feature. 36 | 1. Ensure the code and feature(s) are documented as described in the [Documentation Guidelines](documentation-guide.md). 37 | 1. Ensure the code works within the containers provided. 38 | 1. Create a pull request. 39 | 40 | If you are new to to the Git workflow, [check out this introduction from Github](https://guides.github.com/activities/hello-world/). 41 | -------------------------------------------------------------------------------- /documentation/customize-node-red.md: -------------------------------------------------------------------------------- 1 | # Customize Node-RED Flows 2 | 3 | In order to customize the Node-RED flows, you will need to be able to access the Node-RED flow editor. There are two Node-RED flows in this project: "opcsimulator" and "edgetoinfluxdb." 4 | 5 | ## Access the Node-RED Flows 6 | To access the flow for the opcsimulator, go to http://edgeipaddress:1880/ and for the edgetoinfluxdb flow go to http://edgeipaddress:1881/. Don't forget to open ports 1880 and 1881 on your edge device in order to reach those Node-RED flows. Login using the user name "reader" and the password "NRReader123". This will allow you to browse the flows in read-only mode (no flow deployment). If you want to be able to modify the flows, you will need to create a new admin password. Refer to the following sections for instructions. 7 | 8 | ## Generate New Admin Passwords 9 | The flows have a default admin password set. In order to view and modify the flows you will need to generate a password hash and update the hashed password in the settings.js file of both modules. You will then need to redeploy the modules. More information about securing Node-RED can be found [here](https://nodered.org/docs/user-guide/runtime/securing-node-red). 10 | 11 | ### Steps to generate and update Node-RED admin passwords 12 | 1. SSH into your edge machine 13 | 2. Get the container ID of the "edgetoinfluxdb" module: 14 | ```bash 15 | sudo docker ps 16 | ``` 17 | 3. SSH into the container: 18 | ```bash 19 | sudo docker exec -it bash 20 | ``` 21 | 4. In the container shell, run the following to create a password hash: 22 | ```bash 23 | node -e "console.log(require('bcryptjs').hashSync(process.argv[1], 8));" 24 | ``` 25 | 5. Copy the resulting hashed password for use in the next steps. 26 | 6. In your forked repo, locate the files opcsimulator/settings.js and edgetoinfluxdb/settings.js. 27 | 7. Locate the following section of each file and update the password hash with the password hash you generated in step 4. 28 | ```bash 29 | adminAuth: { 30 | type: "credentials", 31 | users: [{ 32 | username: "admin", 33 | password: "$2a$08$iiR32/SpJlZkZQ3MGEtd8OuC22n5qtvO/msabc123abc123abc123", 34 | permissions: "*" 35 | }, 36 | ``` 37 | 8. Commit your changes and build\redeploy your solution. 38 | -------------------------------------------------------------------------------- /documentation/customize-sample-oee.md: -------------------------------------------------------------------------------- 1 | 2 | # Customizing the IoT Offline Dashboarding sample 3 | 4 | The components in the solution are driven by configuration files, contained in and deployed via their corresponding Docker images. This allows customizing the dashboard, by simply updating the corresponding dashboard configuration file and redeploying (possibly 'at scale') the respective images. 5 | 6 | > [!NOTE] 7 | > This page describes customization within the defined manufacturing scenario. If you are interested in adapting the sample for other industries or use cases, please see [Customizing the dashboard sample for other use cases](customize-sample-other.md). 8 | 9 | **Table of contents** 10 | * [Connecting assets / OPC servers](#connecting-assets-/-opc-servers) 11 | * [Adding a new asset (basic scenario)](#adding-a-new-asset-basic-scenario) 12 | * [Adding a new asset (complex scenario)](#adding-a-new-asset-complex-scenario) 13 | 14 | ## Connecting assets / OPC servers 15 | 16 | ### Removing simulators 17 | 18 | To remove the simulators, modify the `publishedNodes.json` file found in `modules\opcpublisher` and remove the two nodes shown below. Their data will stop flowing into the database. 19 | 20 | ```json 21 | [ 22 | { 23 | "EndpointUrl": "opc.tcp://opcsimulator:54845/OPCUA/Site1", 24 | "UseSecurity": false, 25 | "OpcNodes": [ 26 | { 27 | "Id": "ns=1;s=STATUS", 28 | "OpcSamplingInterval": 1000, 29 | "OpcPublishingInterval": 5000, 30 | "DisplayName": "STATUS" 31 | }, 32 | { 33 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 34 | "OpcSamplingInterval": 1000, 35 | "OpcPublishingInterval": 5000, 36 | "DisplayName": "ITEM_COUNT_GOOD" 37 | }, 38 | { 39 | "Id": "ns=1;s=ITEM_COUNT_BAD", 40 | "OpcSamplingInterval": 1000, 41 | "OpcPublishingInterval": 5000, 42 | "DisplayName": "ITEM_COUNT_BAD" 43 | } 44 | ] 45 | }, 46 | { 47 | "EndpointUrl": "opc.tcp://opcsimulator:54855/OPCUA/Site2", 48 | "UseSecurity": false, 49 | "OpcNodes": [ 50 | { 51 | "Id": "ns=1;s=STATUS", 52 | "OpcSamplingInterval": 1000, 53 | "OpcPublishingInterval": 5000, 54 | "DisplayName": "STATUS" 55 | }, 56 | { 57 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 58 | "OpcSamplingInterval": 1000, 59 | "OpcPublishingInterval": 5000, 60 | "DisplayName": "ITEM_COUNT_GOOD" 61 | }, 62 | { 63 | "Id": "ns=1;s=ITEM_COUNT_BAD", 64 | "OpcSamplingInterval": 1000, 65 | "OpcPublishingInterval": 5000, 66 | "DisplayName": "ITEM_COUNT_BAD" 67 | } 68 | ] 69 | } 70 | ] 71 | ``` 72 | 73 | Afterwards, delete the previously added telemetry records from InfluxDB: 74 | 75 | ```sql 76 | DROP MEASUREMENT DeviceData 77 | ``` 78 | 79 | Note that edge-to-flux flow automatically creates measurements if it does not exists. 80 | 81 | Finally, remove the "opcsimulator" module from deployment. 82 | 83 | ### Adding a new asset (basic scenario) 84 | 85 | The following steps assume an OPC Server, installed and connected to real assets and equipment, that publishes three data points (`STATUS`, `ITEM_COUNT_GOOD`, `ITEM_COUNT_BAD`). 86 | 87 | #### Configuring the OPC UA server 88 | 89 | Configure the OPC UA server to publish the following data points with numeric data types: 90 | 91 | 1. STATUS (double) 92 | 1. ITEM_COUNT_GOOD (double) 93 | 1. ITEM_COUNT_BAD (double) 94 | 95 | Note down the "NodeId" values for all three data points, which are used in the `publishedNodes.json` configuration file. 96 | 97 | Configure the security aspects to make sure the solution has access. 98 | 99 | #### Adding nodes to the solution 100 | 101 | The `publishedNodes.json` configuration file contains the OPC UA nodes to be monitored by the OPC Publisher module. The file can be found in `modules\opcpublisher`. By default the configuration contains two simulators (see above) and three nodes for each simulator: 102 | 103 | ```json 104 | [ 105 | { 106 | "EndpointUrl": "opc.tcp://opcsimulator:54845/OPCUA/Site1", 107 | "UseSecurity": false, 108 | "OpcNodes": [ 109 | { 110 | "Id": "ns=1;s=STATUS", 111 | "OpcSamplingInterval": 1000, 112 | "OpcPublishingInterval": 5000, 113 | "DisplayName": "STATUS" 114 | }, 115 | { 116 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 117 | "OpcSamplingInterval": 1000, 118 | "OpcPublishingInterval": 5000, 119 | "DisplayName": "ITEM_COUNT_GOOD" 120 | }, 121 | { 122 | "Id": "ns=1;s=ITEM_COUNT_BAD", 123 | "OpcSamplingInterval": 1000, 124 | "OpcPublishingInterval": 5000, 125 | "DisplayName": "ITEM_COUNT_BAD" 126 | } 127 | ] 128 | }, 129 | { 130 | "EndpointUrl": "opc.tcp://opcsimulator:54855/OPCUA/Site2", 131 | "UseSecurity": false, 132 | "OpcNodes": [ 133 | { 134 | "Id": "ns=1;s=STATUS", 135 | "OpcSamplingInterval": 1000, 136 | "OpcPublishingInterval": 5000, 137 | "DisplayName": "STATUS" 138 | }, 139 | { 140 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 141 | "OpcSamplingInterval": 1000, 142 | "OpcPublishingInterval": 5000, 143 | "DisplayName": "ITEM_COUNT_GOOD" 144 | }, 145 | { 146 | "Id": "ns=1;s=ITEM_COUNT_BAD", 147 | "OpcSamplingInterval": 1000, 148 | "OpcPublishingInterval": 5000, 149 | "DisplayName": "ITEM_COUNT_BAD" 150 | } 151 | ] 152 | } 153 | ] 154 | ``` 155 | 156 | Add any new server node at the end of the file, along with the three data nodes (`STATUS`, `ITEM_COUNT_GOOD`, `ITEM_COUNT_BAD`). The new node should look similar to: 157 | 158 | ```json 159 | { 160 | "EndpointUrl": "opc.tcp://:/", 161 | "UseSecurity": false, 162 | "OpcNodes": [ 163 | { 164 | "Id": "", 165 | "OpcSamplingInterval": 1000, 166 | "OpcPublishingInterval": 5000, 167 | "DisplayName": "STATUS" 168 | }, 169 | { 170 | "Id": "", 171 | "OpcSamplingInterval": 1000, 172 | "OpcPublishingInterval": 5000, 173 | "DisplayName": "ITEM_COUNT_GOOD" 174 | }, 175 | { 176 | "Id": "", 177 | "OpcSamplingInterval": 1000, 178 | "OpcPublishingInterval": 5000, 179 | "DisplayName": "ITEM_COUNT_BAD" 180 | } 181 | ] 182 | } 183 | 184 | ``` 185 | 186 | Note that above sample sets `UseSecurity: false`, which is not recommended in production environments. 187 | 188 | ### Adding the Site Level Performance to the dashboard 189 | 190 | The following dashboard panels require the running status of asset: 191 | 192 | 1. OEE Gauge 193 | 1. OEE History 194 | 1. Availability Gauge 195 | 1. Availability History 196 | 1. Performance Gauge 197 | 1. Performance History 198 | 199 | Each of these panels use a mapping set, which is defined in the query below, meaning if `STATUS` is 101, 105 or 108, the dashboard will consider the asset to be relevant for the [KPI calculations](manufacturing-kpis.md). 200 | 201 | ```sql 202 | StatusValuesForOn = [101,105,108] 203 | ``` 204 | 205 | Modify these values in each panel's query to reflect `STATUS` values that indicate the `RUNNING` state of any asset. 206 | 207 | After modification, save the dashboard JSON file (`dashboard->settings->JSON Model`) under `modules\grafana\grafana-provisioning\dashboards` and rebuild deployment. 208 | 209 | ### Adding a new asset (complex scenario) 210 | 211 | #### Configuring the OPC UA server 212 | 213 | Configure the OPC UA server to publish any desired data points. 214 | 215 | Note down the "NodeId" values for all three data points, which are used in the `publishedNodes.json` configuration file. 216 | 217 | Configure the security aspects to make sure the solution has access. 218 | 219 | #### Adding nodes to the solution 220 | 221 | The `publishedNodes.json` configuration file contains the OPC UA nodes to be monitored by the OPC Publisher module. The file can be found in `modules\opcpublisher`. By default the configuration contains two simulators (see above) and three nodes for each simulator: 222 | 223 | Add new node definitions including any security settings required. Note that the sample below sets `UseSecurity: false`, which is not recommended in production environments. 224 | 225 | ```json 226 | [ 227 | { 228 | "EndpointUrl": "opc.tcp://:/", 229 | "UseSecurity": false, 230 | "OpcNodes": [ 231 | { 232 | "Id": "", 233 | "OpcSamplingInterval": , 234 | "OpcPublishingInterval": , 235 | "DisplayName": "" 236 | }, 237 | . 238 | . 239 | ] 240 | }, 241 | . 242 | . 243 | ] 244 | 245 | ``` 246 | 247 | #### Modify how IoT messages are received 248 | 249 | The edge-to-influx flow receives IoT messages and formats them into an upsert command for InfluxDB. The implementation below handles various differences between different OPC Publisher versions. 250 | 251 | ```javascript 252 | //type checking 253 | var getType = function (elem) { 254 | return Object.prototype.toString.call(elem).slice(8, -1); 255 | }; 256 | 257 | function appendLeadingZeroes(n,digits){ 258 | var s=""; 259 | var start; 260 | if(n <= 9){ 261 | start=1; 262 | } 263 | else if(n > 9 && n<= 99){ 264 | start=2; 265 | } 266 | else if(n > 99){ 267 | start=3; 268 | } 269 | 270 | for (i=start;i2){rnode.ApplicationUri=tmpStr[0].substring(0,tmpStr[0].length-2);} 328 | else {rnode.ApplicationUri=tmpStr[0];} 329 | } 330 | 331 | //make sure timestamp property exists 332 | if (rnode.Timestamp === undefined){ 333 | rnode.Timestamp = new Date().toString(); 334 | } 335 | 336 | rnode.time = new Date(rnode.Timestamp).getTime()*1000000; 337 | 338 | var new_payload = 339 | { 340 | measurement: "DeviceData", 341 | fields: { 342 | //field added in next statement 343 | }, 344 | tags:{ 345 | Source: rnode.ApplicationUri, 346 | }, 347 | timestamp: rnode.time 348 | } 349 | ; 350 | 351 | new_payload.fields[rnode.DisplayName]=rnode.Value; 352 | return new_payload; 353 | } 354 | 355 | //main 356 | if (getType(msg.payload) === 'Array'){ 357 | for (index = 0; index < msg.payload.length; index++) { 358 | msg.payload[index] = processNode(msg.payload[index]); 359 | } 360 | } 361 | else 362 | { 363 | var newnode = processNode(msg.payload); 364 | msg.payload = new Array(newnode); 365 | } 366 | return msg; 367 | 368 | ``` 369 | 370 | The resulting JSON is sent to InfluxDB: 371 | 372 | ```json 373 | [ 374 | { 375 | "measurement": "DeviceData", 376 | "fields": { 377 | "ITEM_COUNT_BAD": 8, 378 | "ITEM_COUNT_GOOD": 1 379 | }, 380 | "tags": { 381 | "Source": "urn:edgevm3.internal.cloudapp.net:OPC-Site-02" 382 | }, 383 | "timestamp": 1591382648856000000 384 | } 385 | ] 386 | ``` 387 | 388 | Here are a few tips creating JSON messages: 389 | 390 | * Use a JSON array as a root that may contain multiple tuples 391 | * `"measurement"` refers to the table name to insert data into different measurement buckets. 392 | * `"fields"` are actual metrics / telemetry / data points that are identified by timestamp and tag values 393 | * Use multiple field values in a single message 394 | * Alternatively, use separate messages for different fields. As long as the timestamp and tag values are the same, these are considered to be part of same tuple. 395 | * `"tags"` are values that describe the metric, for example to identify or locate the asset. Use this flow to further contextualize the data by accessing other LOB systems and merging data into same tuple as a tag. 396 | * Use the `timestamp` value from the source OPC Server or create a separate value, according to your use case. 397 | 398 | #### Modifying dashboards 399 | 400 | When changing data fields, the dashboard and all panels need to be re-designed and the respective queries modified. See [Manufacturing KPIs](/documentation/manufacturing-kpis.md) for a guidance on how the dashboards are built. 401 | 402 | 403 | #### Queries used in dashboards 404 | 405 | See [flux query reference](./documentation/flux-query-reference.md) for more information on queries used in dashboards. -------------------------------------------------------------------------------- /documentation/customize-sample-other.md: -------------------------------------------------------------------------------- 1 | # Customize the IoT Offline Dashboarding sample for other use cases 2 | 3 | The process for customizing or re-using the sample for other use cases will, obviously, depend on the details of the use case. However, some high level guidance can be found below. 4 | 5 | > [!NOTE] 6 | > This page describes the adaptation of the sample for other, non-manufacturing industries or use cases. If you are interested in just adding your own data sources to the sample, please see [Customizing the offline dashboarding solution](customize-sample-oee.md). 7 | 8 | **Table of contents** 9 | * [Understanding the current code and architecture](#understanding-the-current-code-and-architecture) 10 | * [Data sources](#data-sources) 11 | * [Moving data from edgeHub to InfluxDB](#moving-data-from-edgeHub-to-InfluxDB) 12 | * [Understanding InfluxDB](#understanding-influxdb) 13 | * [Understanding Grafana](#understanding-grafana) 14 | 15 | ## Understanding the current code and architecture 16 | 17 | One key to customization of the solution is gaining a good understanding of how it works. Reading all the documentation is good, but like most samples "the truth is in the code". 18 | 19 | [Deploying the solution](deployment-manual.md) "as-is" is a great starting point for understanding its functionality. The deployment process will create a Grafana dashboards to look at, however it's important to understand the Node-RED flows for both the `opcsimulator` and more importantly, the `edgetoinfluxdb` module flow. 20 | 21 | Using the same `'az cli vm open-port'` command as during the enviroment prep will also open port 1880 (opcsimulator) and port 1881 (edgetoinfluxdb). 22 | 23 | This will enable `http://{vm ip address}:1880` and `http://{vm ip address}:1881` to see those flows. 24 | 25 | > [!NOTE] 26 | > Since this is a sample, those flows are not secured with any kind of authentication. Only do this on a box with test data! 27 | 28 | ## Data sources 29 | 30 | The first step in customization will be shutting off the sample data sources. Remove the opcsimulator and opcpublisher module from the solution by removing them from the [deployment.template.json](/deployment.template.json) file, and if desired, deleting the corresponding folders in /modules. 31 | 32 | After that, add your own data source(s). This can be done by using an IoT "leaf" device (i.e. a device external to IoT Edge) and "push" the data to IoT Edge (see [Connect a downstream device to an Azure IoT Edge gateway](https://docs.microsoft.com/en-us/azure/iot-edge/how-to-connect-downstream-device)). Alternatively by writing a module to "pull" the data from its source (like a [Modbus protocol gateway](https://docs.microsoft.com/en-us/azure/iot-edge/deploy-modbus-gateway) or OPC Publisher). 33 | 34 | The key requirement is that whichever source is used, it needs to submit the data to edgeHub through an [edgeHub route](https://docs.microsoft.com/en-us/azure/iot-edge/module-composition#declare-routes) to route the data into the 'edgetoinfluxdb' module. 35 | 36 | ## Moving data from edgeHub to InfluxDB 37 | 38 | The `edgetoinfluxdb` module, which is implemented as Node-Red flow, subscribes to messages from the edgeHub, reformats them as an InfluxDB 'upsert' command, and submits them to the `influxdb` module. This is done in the `Build JSON` node in the flow. 39 | 40 | To use new flows in the sample, export each of them and overwrite the `flows.json` file. 41 | 42 | If there are additional Node-RED add-ins required, add those to the `npm install` commands in the Dockerfile. 43 | 44 | Here is a JSON message produced by the sample by default: 45 | 46 | ```json 47 | [ 48 | { 49 | "measurement": "DeviceData", 50 | "fields": { 51 | "ITEM_COUNT_BAD": 8, 52 | "ITEM_COUNT_GOOD": 1 53 | }, 54 | "tags": { 55 | "Source": "urn:edgevm3.internal.cloudapp.net:OPC-Site-02" 56 | }, 57 | "timestamp": 1591382648856000000 58 | } 59 | ] 60 | ``` 61 | 62 | Here are a few tips creating JSON messages: 63 | 64 | * Use a JSON array as a root that may contain multiple tuples 65 | * `"measurement"` refers to the table name to insert data into different measurement buckets. 66 | * `"fields"` are actual metrics / telemetry / data points that are identified by timestamp and tag values 67 | * Use multiple field values in a single message 68 | * Alternatively, use separate messages for different fields. As long as the timestamp and tag values are the same, these are considered to be part of same tuple. 69 | * `"tags"` are values that describe the metric, for example to identify or locate the asset. Use this flow to further contextualize the data by accessing other LOB systems and merging data into same tuple as a tag. 70 | * Use the `iothub-enqueuedtime` [system property](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-messages-construct#system-properties-of-d2c-iot-hub-messages) of the message from edgeHub as the message `timestamp` or create a separate value, according to your use case. Please note that `timestamp` is a Unix Epoch timestamp and, in the case above, has a precision of nanoseconds. 71 | 72 | ## Understanding InfluxDB 73 | 74 | The InfluxDB module should be usable 'as-is'. One small change to consider is data retention time. For the sample the InfluxDB only retains data for one day. If the data needs to be retained longer, modify the `initdb.iql` file and rebuild / redeploy the container. 75 | 76 | When adding any additional datasources beyond InfluxDB, add them to the [datasource.yaml](/modules/grafana/grafana-provisioning/datasources/datasource.yml) file. 77 | 78 | ## Understanding Grafana 79 | 80 | A tutorial on developing Grafana dashboards is beyond the scope of this documentation, however the [Grafana documentation](https://grafana.com/docs/grafana/latest/) is a good place to start. A web search also provides lots of tutorials and examples. 81 | 82 | Note that our sample dashboards and InfluxDB use the `flux` plug-in and query language, which is great for working with time series data. 83 | 84 | When developing new dashboard(s), put them in the [dashboards](/modules/grafana/grafana-provisioning) folder. The built-in Grafana provisioning process will pick up all artifacts from there, as well as any additional data sources from the [datasource.yaml](/modules/grafana/grafana-provisioning/datasources/datasource.yml) file. 85 | 86 | Rebuild and redeploy all the containers after any change via your chosen deployment method. 87 | 88 | #### Queries used in dashboards 89 | 90 | See [flux query reference](./flux-query-reference.md) for more information on queries used in dashboards. 91 | -------------------------------------------------------------------------------- /documentation/dashboarding-sample.md: -------------------------------------------------------------------------------- 1 | # IoT Offline Dashboarding sample 2 | 3 | As discussed in the [readme](/readme.md) for this project, this use case and architecture was chosen as a significant percentage of Microsoft customers showed interest in offline dashboarding solutions. 4 | 5 | Many of the requests came out of the manufacturing industry, which is why the example is based on a manufacturing scenario. In particular, the sample includes the necessary data collection, calculations, and visualization of the Overall Equipment Effectiveness (OEE) metric, common to manufacturers. For a deep dive of the metrics and calculations involved, please see [this document](manufacturing-kpis.md). 6 | 7 | More information on the deployment of the sample can be found [here](#deployment-of-the-sample). 8 | 9 | For guidance on how to customize the sample for other use cases, please see the [customization](#customizing-the-sample-for-other-use-cases) section below. 10 | 11 | **Table of contents** 12 | - [IoT Offline Dashboarding sample](#iot-offline-dashboarding-sample) 13 | - [Business need](#business-need) 14 | - [Solution architecture](#solution-architecture) 15 | - [Understanding the sample data, calculations, and dashboard elements](#understanding-the-sample-data-calculations-and-dashboard-elements) 16 | - [Deploying the sample](#deploying-the-sample) 17 | - [View the Grafana dashboard](#view-the-grafana-dashboard) 18 | - [Customizing the sample](#customizing-the-sample) 19 | - [Known issues](#known-issues) 20 | 21 | ## Business need 22 | 23 | Smart Manufacturing provides new opportunities to improve inefficiencies across labor, processes, machinery, materials and energy across the manufacturing lifecycle. 24 | 25 | [Azure Industrial IoT](https://azure.microsoft.com/en-us/overview/iot/) provides hybrid-cloud based components to build an end to end industrial IoT platform to enable innovation and to optimize operational processes. 26 | 27 | Most manufacturers start their journey by providing visibility across machines, processes, lines and factories through their unified industrial IoT platform. This is achieved by collecting data from manufacturing processes to provide end to end visibility. 28 | 29 | Different stakeholders will then make use of that platform to cater their own needs e.g planning department doing global planning or engineers monitoring and fine-tuning production phases. 30 | 31 | Operators and users that are responsible for monitoring of operations are at the top of industrial IoT stakeholders list. They are usually responsible for well-being of operations and processes and need to have access to information in real-time. On the other hand, we also know that means of communication (infrastructure) is less than perfect in many manufacturing facilities. Although, we can provide real time access in the industrial IoT platform, what would happen if communication to cloud is cut-off? In terms of data reliability, Azure IoT Edge ensures data is accumulated when communication to cloud is broken and sent to the industrial IoT platform when cloud communication is restored. But how can users access real time information in the meanwhile? 32 | 33 | There are two major concerns this sample implementation addresses: 34 | 35 | * Give local machine operators the ability to view telemetry and Key Performance Indicators (KPIs) during intermittent or offline internet connection scenarios. 36 | * View near real-time telemetry and KPIs without the latency of telemetry data traveling to the cloud first. 37 | 38 | ## Solution architecture 39 | 40 | The "Offline Dashboards" sample is built upon [Azure IoT Edge](https://azure.microsoft.com/en-us/services/iot-edge/) technology. IoT Edge is responsible for deploying and managing lifecycle of a set of modules (described later) that make up Offline Dashboards sample. 41 | 42 | Offline Dashboards runs on the IoT Edge device, continuously recording data that is sent from devices to IoT Hub 43 | 44 | ![Diagram showing the offline dashboard architecture](../media/OfflineDashboards_diag1.png) 45 | 46 | The offline dashboarding sample contains 5 modules: 47 | 48 | 1. A Node-RED module that runs an OPC-UA simulator, that emulates sending data from two "sites" 49 | 2. The [OPC-UA Publisher](https://github.com/Azure/iot-edge-opc-publisher) module provided by Microsoft's Industrial IoT team, that reads OPC-UA data from the simulator and writes it to IoT Edge (via edgeHub) 50 | 3. A Node-RED module that collects data from OPC Publisher (via edgeHub) and writes that data into influxDB. 51 | 4. An InfluxDB module which stores data in time series structure 52 | 5. A Grafana module which serves data from InfluxDB in dashboards. 53 | 54 | ![Diagram showing the Azure IoT Edge solution architecture](/media/OfflineDashboards_diag2.png) 55 | 56 | ## Understanding the sample data, calculations, and dashboard elements 57 | 58 | The sample dashboard provides meaningful calculations of the Overall Equipment Effectiveness (OEE) metric common to manufacturers. There is a [documentation on how these KPIs are defined and calculated](manufacturing-kpis.md). 59 | 60 | ## Deploying the sample 61 | 62 | The first step in running the sample is to have a functioning, Linux-based IoT Edge instance (Windows support coming). You can set one up by following the instructions [here](setup-edge-environment.md). 63 | 64 | Once you have a functioning IoT Edge environment, the sample provides several options for deployment, in both order of incrementing complexity and in order of increasing recommendation (for repeatability and being less error prone): 65 | 66 | * [Manual](deployment-manual.md) - For manual deployment instructions, leveraging the Docker command line and the Azure Portal 67 | * [Visual Studio Code](deployment-vscode.md) - For bulding and deploying to a single IoT Edge device via VS Code 68 | * [Azure DevOps](deployment-devops.md) - For integrating the build and deployment process into an Azure DevOps pipeline 69 | 70 | ### View the Grafana dashboard 71 | 72 | Verify that the IoT Edge modules are indeed running by viewing the running Grafana dashboard. To do that, replace the {ip-address} in the following link with your own VM ip address and open the URL with a web browser: 73 | 74 | ```http 75 | http://{ip-address}:3000/ 76 | ``` 77 | 78 | Login to Grafana using "admin" as user name and the password specified in the "GF_SECURITY_ADMIN_PASSWORD" environment variable (in grafana module options). 79 | 80 | > [!NOTE] 81 | > There is currently a bug: For some reason, the data source details are deployed correctly, however not shown as 'enabled'. This will cause the dashboard to display an error and not show any data. 82 | > 83 | > When logged in, click the gear icon on the left-hand panel and select data sources -> "myinfluxdb" to navigate into the settings. Click the "Save & Test" button at the bottom. Grafana should now show "Data source connected and database found." 84 | 85 | When the data sources are active, hover over the dashboard icon in the left side panel and click "Manage." There should be several OOE related dashboards under the General folder. Click on the "Site Performance" dashboard to get started. The resulting dashboard should look like below: 86 | 87 | ![Grafana Dashboard](/media/grafana-dash.png) 88 | 89 | > [!NOTE] 90 | > It may take upwards of 10 minutes for all graphs to show correctly since they rely on a history of data. 91 | 92 | Feel free to explore the other dashboards available. 93 | 94 | ## Customizing the sample 95 | 96 | If your use case is manufacturing / OEE and your goal is to change the data sources, this [document](customize-sample-oee.md) discusses the process and options. 97 | 98 | If your use case is something entirely different, this [document](customize-sample-other.md) gives a high level overview of the process involved. 99 | 100 | ## Known issues 101 | 102 | There are a few known issues with the sample that we are aware of: 103 | 104 | * When deploying Grafana, the pre-configured datasource (myinfluxdb) is property configured, however the configuration isn't shown as 'active'. There is a manual process above to manually configure and activate it. 105 | * The deployment of Grafana doesn't currently work with backing, host-based storage. This means that any changes made to Grafana (users, dashboards, etc) are lost if the container is removed or replaced. 106 | -------------------------------------------------------------------------------- /documentation/deployment-devops.md: -------------------------------------------------------------------------------- 1 | # Deploy the IoT Offline Dashboarding sample via Azure Devops pipelines 2 | 3 | This document describes how to set up an Azure DevOps pipeline to deploy the IoT Offline Dashboarding sample. Chose this option to ensure a conitnuous, repeatable development, build, and deployment process, as well as having the ability to test the deployment to multiple IoT Edge devices at scale. 4 | 5 | **Table of contents** 6 | * [Preparation of Edge boxes and IoT Hub](#preparation-of-edge-boxes-and-iot-hub) 7 | * [Forking the repository](#forking-the-repository) 8 | * [Setting up an Azure DevOps organization and project](#setting-up-an-azure-devops-organization-and-project) 9 | * [Creating the DevOps pipeline](#creating-the-devops-pipeline) 10 | * [Executing the pipeline](#executing-the-pipeline) 11 | * [Verify successful deployment](#verify-successful-deployment) 12 | * [See also](#see-also) 13 | 14 | ## Preparation of Edge boxes and IoT Hub 15 | 16 | The DevOps pipeline is going to target multiple Edge boxes. 17 | 18 | **Please start by setting up your Edge devices by going through the [edge environment setup](setup-edge-environment.md) document.** 19 | 20 | These devices will be targeted via a `tag` in the Edge devices [Device Twin](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-device-twins#device-twins). The DevOps pipeline creates an IoT Hub [automatic deployment](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-automatic-device-management) that targets any Edge devices that have the specified `tag` in their device twin. 21 | 22 | To create such a tag, navigate to the IoT Hub and choose "IoT Edge" in the navigation. Click on the respective IoT Edge device and select "Device Twin" on the top left of the blade. Create a `tag` in the device twin for your Edge device, for example: 23 | 24 | ```json 25 | { 26 | (((rest of device twin removed for brevity))) 27 | 28 | "tags": 29 | {"dashboard":true}, 30 | 31 | (((rest of device twin removed for brevity))) 32 | } 33 | ``` 34 | 35 | Save the device twin after the change. Repeat this process for any additional IoT Edge devices that should be targeted with the pipeline. 36 | 37 | ## Forking the repository 38 | 39 | The DevOps pipeline details for the sample are included in [the github repository](https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding). 40 | 41 | Do a GitHub [fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) of the repository to your own workspace. After that, continue making changes to the pipeline configuration, for example changing the target conditions. 42 | 43 | ## Setting up an Azure DevOps organization and project 44 | 45 | An Azure DevOps pipeline is always part of a [project](https://docs.microsoft.com/en-us/azure/devops/organizations/projects/create-project?view=azure-devops&tabs=preview-page), which is part of an [organization](https://docs.microsoft.com/en-us/azure/devops/organizations/accounts/create-organization?view=azure-devops). Follow the instructions on the given websites, but skip the 'Add a Repository to your Project' part since this is managed on GitHub. 46 | 47 | Before adding the pipeline there are two project-level preliminary tasks. 48 | 49 | ### Install the GitVersion add-in 50 | 51 | [GitVersion](https://marketplace.visualstudio.com/items?itemName=gittools.usegitversion) can be used to automatically derive image version tags from a repository. Use the "Get it free" button on the link above to install the add-in into the organization. 52 | 53 | ### Create a service connection to Azure 54 | 55 | A service connection to Azure allows DevOps to push images and create deployments for an Azure subscription. 56 | 57 | * In the lower left corner of the pipelin settings, choose "Project Settings" 58 | * From the left navigation, choose "Service Connections" 59 | * Click "New Service Connection" 60 | * Choose "Azure Resource Manager" and hit "next" 61 | * Choose "Service Principal (automatic)" then "next" 62 | * Choose an Azure subscription from the dropdown 63 | * (For environments where you may not have subscription-level permissions, you may have to also select the specific Resource Group where you deployed your IoT Hub and ACR instance) 64 | * Add a name for the service connection and hit Save 65 | 66 | ## Creating the DevOps pipeline 67 | 68 | Click on Pipelines from the left-nav and then select "Create Pipeline". 69 | 70 | * From the "Where is your code?" screen, choose Github 71 | * You may see a screen asking for authentication: "Authenticate to authorize access" 72 | * From the "Select a repository" screen, select the fork created above 73 | * Select "Approve & Install Azure Pipelines" if required 74 | * From the "review your pipeline" screen, click the down-arrow next to Run and click "Save" - note that a number of variables need to be added before the first run 75 | 76 | ### Set the pipeline environment variables 77 | 78 | To make the pipeline as generic as possible, much of the config is supplied in the form of environment variables. To add these variables, click on "Variables" in the upper right hand corner of the "Edit Pipeline" screen. Add the following variables and values: 79 | 80 | * ACR_NAME: This is the 'short name' of our Azure Container Registry (the part before .azurecr.io) 81 | * ACR_RESOURCE_GROUP: The name of the resource group in Azure that contains the Azure Container Registry 82 | * AZURE_SERVICE_CONNECTION: The name of the Azure service connection created above 83 | * AZURE_SUBSCRIPTION_ID: The ID of the used Azure subscription 84 | * GRAFANA_ADMIN_PASSWORD: The desired administrator password for the Grafana dashboard web app when deployed 85 | * IOT_HUB_NAME: The name of the connected Azure IoT Hub (short name, without the .azure-devices.net) 86 | * DEPLOYMENT_TARGET_CONDITION: The target condition to use for the deployment. This is in line with the target tags for the Edge box's device twin. Based on the tag used above, the value would be 'tags.dashboard=true'. 87 | * Click "Save" 88 | 89 | ## Executing the pipeline 90 | 91 | The pipeline is set to trigger on commits to the master branch of the GitHub repository. However for testing it can be run manually. 92 | 93 | Click on "Run" in the upper right hand corner to start the manual execution of the pipeline. The pipeline has "Build" and "Release" stages. Click on the "Build" stage to open the detail view while running. 94 | 95 | ## Verify successful deployment 96 | 97 | SSH into your IoT Edge box and run: 98 | 99 | ```bash 100 | sudo iotedge list 101 | ``` 102 | 103 | Confirm that all modules have been deployed. Note that it might take several minutes to deploy each module, depending on the speed of each Edge box's Internet connection. 104 | 105 | Once confirmed that the modules are running, return to the [page on Grafana Dashboards](/documentation/dashboarding-sample.md#view-the-grafana-dashboard) to see and customize the dashboard. 106 | 107 | ## See also 108 | 109 | * [Deploying manually](deployment-manual.md) 110 | * [Deploying via VSCode](deployment-vscode.md) 111 | -------------------------------------------------------------------------------- /documentation/deployment-manual.md: -------------------------------------------------------------------------------- 1 | # Deploy the IoT Offline Dashboarding sample manually 2 | 3 | This document describes how to deploy the IoT Offline Dashboarding sample manually on an Edge device. Chose this option to gain a deep understanding of every required step involved in deploying the sample. 4 | 5 | **Table of contents** 6 | - [Deploy the IoT Offline Dashboarding sample manually](#deploy-the-iot-offline-dashboarding-sample-manually) 7 | - [Preparation of Edge boxes and IoT Hub](#preparation-of-edge-boxes-and-iot-hub) 8 | - [Building all Edge module images](#building-all-edge-module-images) 9 | - [Deploying all Edge modules](#deploying-all-edge-modules) 10 | - [Adding the Edge modules](#adding-the-edge-modules) 11 | - [Adding the routes](#adding-the-routes) 12 | - [Deploying modules to devices](#deploying-modules-to-devices) 13 | - [See also](#see-also) 14 | 15 | ## Preparation of Edge boxes and IoT Hub 16 | 17 | **Please start by setting up your Edge devices by going through the [edge environment setup](setup-edge-environment.md) document.** 18 | 19 | ## Building all Edge module images 20 | 21 | Before any Edge modules can be deployed, it is necessary to build the module images using the Dockerfiles found in the repository. Once built, the images need to be placed into a container registry. 22 | 23 | Start by cloning [the github repository](https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding) to a machine that has docker installed (possibly the Iot Edge VM device you created above, or a local development machine).. 24 | 25 | ```bash 26 | git clone https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding.git 27 | ``` 28 | 29 | Replace `{registry}` in the commands below with the container registry location created earlier (e.g. myregistry.azurecr.io). 30 | 31 | ```bash 32 | sudo docker login {registry} 33 | 34 | cd iot-edge-offline-dashboarding/modules/edgetoinfluxdb 35 | sudo docker build --tag {registry}/edgetoinfluxdb:1.0 . 36 | sudo docker push {registry}/edgetoinfluxdb:1.0 37 | 38 | cd ../grafana 39 | sudo docker build --tag {registry}/grafana:1.0 . 40 | sudo docker push {registry}/grafana:1.0 41 | 42 | cd ../influxdb 43 | sudo docker build --tag {registry}/influxdb:1.0 . 44 | sudo docker push {registry}/influxdb:1.0 45 | 46 | cd ../opcpublisher 47 | sudo docker build --tag {registry}/opcpublisher:1.0 . 48 | sudo docker push {registry}/opcpublisher:1.0 49 | 50 | cd ../opcsimulator 51 | sudo docker build --tag {registry}/opcsimulator:1.0 . 52 | sudo docker push {registry}/opcsimulator:1.0 53 | ``` 54 | 55 | ## Deploying all Edge modules 56 | 57 | All five module images should now be in a container registry. Instances of these module images can now be deployed to an Edge machine using IoT Hub. 58 | 59 | Navigate to the desired IoT Hub instance in the Azure portal and select "IoT Edge". All registered Edge devices should be visible. Click on the desired Edge device and click "Set Modules." In the "Container Registry Credentials", put the name, address, user name and password of the registry container used when [building the Edge module images](#building-all-edge-module-images). 60 | 61 | ### Adding the Edge modules 62 | 63 | In the "IoT Edge Modules" section, click the "+ Add" button and select "IoT Edge Module". For "IoT Edge Module Name" enter `"edgetoinfluxdb"` and for "Image URI" enter `"{registry}/edgetoinfluxdb:1.0"`. Be sure to replace `{registry}` with the registry address defined above. Switch to the "Container Create Options" and place the following JSON into the create options field: 64 | 65 | ```json 66 | { 67 | "HostConfig": { 68 | "PortBindings": { 69 | "1880/tcp": [ 70 | { 71 | "HostPort": "1881" 72 | } 73 | ] 74 | } 75 | } 76 | } 77 | ``` 78 | 79 | Click the "Add" button to complete the creation of the module for it to be deployed. This needs to be repeated for all other four remaining modules. The following are the property values for each module. Note: the variable {GF_SECURITY_ADMIN_PASSWORD} represents the admin password that you will use to log into the Grafana dashboards once deployment is complete. 80 | 81 | **Module grafana:** 82 | 83 | ```json 84 | IoT Edge Module Name: grafana 85 | Image URI: {registry}/grafana:1.0 86 | Environment Variable: 87 | Name: GF_SECURITY_ADMIN_PASSWORD 88 | Value: {password} 89 | Container Create Options: 90 | { 91 | "HostConfig": { 92 | "PortBindings": { 93 | "3000/tcp": [ 94 | { 95 | "HostPort": "3000" 96 | } 97 | ] 98 | } 99 | } 100 | } 101 | ``` 102 | 103 | **Module influxdb:** 104 | 105 | ```json 106 | IoT Edge Module Name: influxdb 107 | Image URI: {registry}/influxdb:1.0 108 | Container Create Options: 109 | { 110 | "HostConfig": { 111 | "Binds": [ 112 | "/influxdata:/var/lib/influxdb" 113 | ], 114 | "PortBindings": { 115 | "8086/tcp": [ 116 | { 117 | "HostPort": "8086" 118 | } 119 | ] 120 | } 121 | } 122 | } 123 | ``` 124 | 125 | **Module opcpublisher:** 126 | 127 | ```json 128 | IoT Edge Module Name: opcpublisher 129 | Image URI: {registry}/opcpublisher:1.0 130 | Container Create Options: 131 | { 132 | "Hostname": "publisher", 133 | "Cmd": [ 134 | "--pf=/app/pn.json", 135 | "--aa" 136 | ] 137 | } 138 | ``` 139 | 140 | **Module opcsimulator:** 141 | 142 | ```json 143 | IoT Edge Module Name: opcsimulator 144 | Image URI: {registry}/opcsimulator:1.0 145 | Container Create Options: 146 | { 147 | "HostConfig": { 148 | "PortBindings": { 149 | "1880/tcp": [ 150 | { 151 | "HostPort": "1880" 152 | } 153 | ] 154 | } 155 | } 156 | } 157 | ``` 158 | 159 | The "Set modules" dialog should now look like this: 160 | 161 | ![Edge Modules](../media/edge-modules.png) 162 | 163 | ### Adding the routes 164 | 165 | Next, click on the "Routes" tab and add the following route with the name "opc": 166 | 167 | ```bash 168 | FROM /messages/modules/opcpublisher/* INTO BrokeredEndpoint("/modules/edgetoinfluxdb/inputs/input1") 169 | ``` 170 | 171 | ![Edge Routes](../media/edge-routes.png) 172 | 173 | ### Deploying modules to devices 174 | 175 | Click the "Review + Create" button and then select the "Create" button. This will start the deployment. Assuming all goes well the modules will be running after several minutes. The "IoT Edge Runtime Response" should be "200 -- Ok" and the module runtime status "running". 176 | 177 | ![Edge Success](../media/edge-success.png) 178 | 179 | Once confirmed that the modules are running, return to the [page on Grafana Dashboards](/documentation/dashboarding-sample.md#view-the-grafana-dashboard) to see and customize the dashboard. 180 | 181 | ## See also 182 | 183 | * [Deploying via Azure DevOps pipelines](deployment-devops.md) 184 | * [Deploying via VSCode](deployment-vscode.md) 185 | -------------------------------------------------------------------------------- /documentation/deployment-vscode.md: -------------------------------------------------------------------------------- 1 | # Deploy the IoT Offline Dashboarding sample via VSCode 2 | 3 | This document shows how to deploy the IoT Offline Dashboarding sample via [VS Code](https://code.visualstudio.com/). To ease the number of pre-requisites and tools to install, this repository takes advantage of the "[remote container](https://code.visualstudio.com/docs/remote/containers)" support in VS Code. It leverages the same IoT Edge Dev tools that the IoT Edge extensions (and Azure DevOps) use under the hood - without having to install it or its prerequisites locally. 4 | 5 | **Table of contents** 6 | * [Install prerequisites](#install-prerequisites) 7 | * [Clone and open the repository](#clone-and-open-the-repository) 8 | * [Configuring the sample](#configuring-the-sample) 9 | * [Build and push the sample images](#build-and-push-the-sample-images) 10 | * [Deploy to an IoT Edge device](#deploy-to-an-iot-edge-device) 11 | * [See also](#see-also) 12 | 13 | ## Install prerequisites 14 | 15 | Please install the following prerequisites: 16 | 17 | * [Visual Studio Code](https://code.visualstudio.com/Download) 18 | * [Docker](https://docs.docker.com/get-docker/) 19 | * [Git](https://git-scm.com/downloads) 20 | 21 | Once VS Code is running, click on "Extensions" in the left border navigation. Search for `remote-containers` and click "Install". 22 | 23 | A Docker-compatible container repository is needed to hold the built Docker images. If not already available, set up an Azure Container Registry with these [instructions](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli#create-a-container-registry). Once created navigate to the "Access Keys" blade in the left navigation of the container registry settings and note down the username and password. 24 | 25 | **Please also set up your Edge devices by going through the [edge environment setup](setup-edge-environment.md) document.** 26 | 27 | ## Clone and open the repository 28 | 29 | Open VS Code and click on the "Source Control" icon int he left border navigation. 30 | 31 | !["Source Control" icon int he left border navigation](/media/vscode-source-control.jpg) 32 | 33 | Select "Clone Repository". Paste the URL of this [repository](http://github.com/azureiotgbb/iot-edge-offline-dashboading) into the top input bar and confirm. 34 | 35 | Choose a folder into which to clone the repository and open it after the download is complete. 36 | 37 | When opening the repository VS Code will recognize that it includes a development container, asking if the solution should be re-opened in that container. Choose yes. 38 | 39 | VS Code will proceed to start all development containers. Click on the notification to see the progress in the output window. Confirm each request to share the repository and .azure folders. 40 | 41 | After several minutes, the project will be opened in the development container. 42 | 43 | ## Configuring the sample 44 | 45 | Before the sample can be built and pushed to an Edge device, it needs to be configured. 46 | 47 | ### Defining the IoT Hub connection string 48 | 49 | Find the "AZURE IOT HUB" section in the left "Explorer" bar of VS Code and expand it. Select "More Actions" (...) and choose "Set IoT Hub Connection String" for the IoT Hub [created earlier](setup-edge-environment.md). 50 | 51 | Alternatively use the `'iothubowner'` policy connection string with the following command from the Azure cli: 52 | 53 | ```bash 54 | az iot hub show-connection-string -n {iot hub name} --policy-name iothubowner 55 | ``` 56 | 57 | Paste this connection string into VS Code. 58 | 59 | > [!NOTE] 60 | > This connection string is the primary authentication to an IoT Hub and should be kept secret. 61 | 62 | ### Set environment variables 63 | 64 | A number of environment variables are required to build, push and deploy. If not already auto-generated, create a new file called `".env"` in the project root directory. Paste the following environment variables into the empty file. Make sure to enter your specific values: 65 | 66 | ```bash 67 | CONTAINER_REGISTRY_USERNAME={container registry username} 68 | CONTAINER_REGISTRY_PASSWORD={container registry password} 69 | CONTAINER_REGISTRY_ADDRESS={container registry address} 70 | 71 | GRAFANA_ADMIN_PASSWORD={desired grafana password} 72 | 73 | CONTAINER_VERSION_TAG={image tag} 74 | ``` 75 | 76 | About these environment variables: 77 | * The first three variables are the container registry values for the container registry. In case of an Azure Container Registry, these can be found on the Access Keys blade 78 | * GRAFANA_ADMIN_PASSWORD is the the desired administrative password for the Grafana dashboard (the default username is 'admin') 79 | * CONTAINER_VERSION_TAG is the version tag for the created Docker image (e.g. '0.0.1' or '1.0.0', etc). This is used as the version tag on all created imagesappended with the processor architecture (e.g. myacr.azurecr.io/opcsimulator:0.0.1-amd64) 80 | 81 | Save the new .env file. 82 | 83 | ## Build and push the sample images 84 | 85 | With the setup done, building and pushing the images is straightforward. 86 | 87 | Open a terminal window (CTRL-SHIFT-') and run the following: 88 | 89 | ```bash 90 | docker login {container registry address} -u {user name} -p {password} 91 | ``` 92 | 93 | If the login is successful, right click on the `deployment.template.json` file and choose "Build and Push IoT Edge Solution". 94 | 95 | The docker images for the sample will be built and pushed to your specified container registry. 96 | 97 | ## Deploy to an IoT Edge device 98 | 99 | To deploy the new images to your IoT Edge box: 100 | 101 | * Expand the "AZURE IOT HUB" pane from the bottom of the left Explorer view in VS Code 102 | * Navigate to the desired IoT Edge device 103 | * Right click and choose "Create Deployment for a single device" 104 | * Navigate to the config folder, choose `deployment.amd64.json` and "Select Edge Deployment Manifest" 105 | 106 | The deployment will be submitted to the IoT Hub, which pushes the deployment manifest to your IoT Edge device. To confirm the modules are created and active, run the following command on your Edge device: 107 | 108 | ```bash 109 | sudo iotedge list 110 | ``` 111 | 112 | Confirm that all modules have been deployed. Note that it might take several minutes to deploy each module, depending on the speed of each Edge box's Internet connection. 113 | 114 | Once confirmed that the modules are running, return to the [page on Grafana Dashboards](/documentation/dashboarding-sample.md#view-the-grafana-dashboard) to see and customize the dashboard. 115 | 116 | ## See also 117 | 118 | * [Deploying manually](deployment-manual.md) 119 | * [Deploying via Azure DevOps pipelines](deployment-devops.md) 120 | -------------------------------------------------------------------------------- /documentation/documentation-guide.md: -------------------------------------------------------------------------------- 1 | # Documentation guidelines 2 | 3 | This document outlines the documentation guidelines and standards. It provides an introduction to technical aspects of documentation writing and generation, to highlight common pitfalls, and to describe the recommended writing style. 4 | 5 | The page itself is supposed to serve as an example, therefore it uses the intended style and the most common markup features of the documentation. 6 | 7 | * [Source](#source-documentation) 8 | * [How-to](#how-to-documentation) 9 | 10 | --- 11 | 12 | ## Functionality and markup 13 | 14 | This section describes frequently needed features. To see how they work, look at the source code of the page. 15 | 16 | 1. Numbered lists 17 | 1. Nested numbered lists with at least 3 leading blank spaces 18 | 1. The actual number in code is irrelevant; parsing will take care of setting the correct item number 19 | 1. This way removing or adding a line in between lists will not require updating each number 20 | 21 | * Bullet point lists 22 | * Nested bullet point lists 23 | * Text in **bold** with \*\*double asterisk\*\* 24 | * _italic_ *text* with \_underscore\_ or \*single asterisk\* 25 | * Text `highlighted as code` within a sentence \`using backquotes\` 26 | * Links to docs pages [documentation guidelines](documentation-guide.md) 27 | * Links to [anchors within a page](#style); anchors are formed by replacing spaces with dashes, and converting to lowercase 28 | 29 | For code samples we use the blocks with three backticks \`\`\` and specify the language for syntax highlighting: 30 | 31 | ```javascript 32 | function sampleFunction (i) { 33 | return i + 2; 34 | } 35 | ``` 36 | 37 | When mentioning code within a sentence `use a single backtick`. 38 | 39 | ### TODOs 40 | 41 | Avoid using TODOs in docs or in code, as over time these TODOs tend to accumulate and information about how they should be updated and why gets lost. 42 | 43 | If it is absolutely necessary to add a TODO, follow these steps: 44 | 45 | 1. File a new issue on Github describing the context behind the TODO, and provide enough background that another contributor would be able to understand and then address the TODO. 46 | 1. Reference the issue URL in the todo in the docs. 47 | 48 | > TODO (https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding/issues/ISSUE_NUMBER_HERE): A brief blurb on the issue 49 | 50 | ### Highlighted sections 51 | 52 | To highlight specific points to the reader, use *> [!NOTE]* , *> [!WARNING]* , and *> [!IMPORTANT]* to produce the following styles. It is recommended to use notes for general points and warning/important points only for special relevant cases. 53 | 54 | > [!NOTE] 55 | > Example of a note 56 | 57 | > [!WARNING] 58 | > Example of a warning 59 | 60 | > [!IMPORTANT] 61 | > Example of an important comment 62 | 63 | ## Page layout 64 | 65 | ### Headline 66 | 67 | There should be only one first-level headline per page, acting as the main title. 68 | 69 | If required, add a short introduction what the page is about. Do not make this too long, instead add sub headlines. These allow to link to sections and can be saved as bookmarks. 70 | 71 | ### Main body 72 | 73 | Use two-level and three-level headlines to structure the rest. 74 | 75 | **Mini Sections** 76 | 77 | Use a bold line of text for blocks that should stand out. We might replace this by four-level headlines at some point. 78 | 79 | ### 'See also' section 80 | 81 | Some pages might end with a chapter called *See also*. This chapter is simply a bullet pointed list of links to pages related to this topic. These links may also appear within the page text where appropriate, but this is not required. Similarly, the page text may contain links to pages that are not related to the main topic, these should not be included in the *See also* list. See [this page's ''See also'' chapter](#see-also) as an example for the choice of links. 82 | 83 | ## Style 84 | 85 | ### Writing style 86 | 87 | General rule of thumb: Try to **sound professional**. That usually means to avoid a 'conversational tone'. Also try to avoid hyperbole and sensationalism. 88 | 89 | 1. Don't try to be (overly) funny. 90 | 2. Never write 'I' 91 | 3. Avoid 'we'. This can usually be rephrased easily, using 'This sample' instead. Example: "we support this feature" -> "This sample supports this feature" or "the following features are supported ...". 92 | 4. Similarly, try to avoid 'you'. Example: "With this simple change the dashboard becomes configurable!" -> "Dashboards can be made configurable with little effort." 93 | 5. Do not use 'sloppy phrases'. 94 | 6. Avoid sounding overly excited, we do not need to sell anything. 95 | 7. Similarly, avoid being overly dramatic. Exclamation marks are rarely needed. 96 | 97 | ### Capitalization 98 | 99 | * Use **Sentence case for headlines**. Ie. capitalize the first letter and names, but nothing else. 100 | * Use regular English for everything else. That means **do not capitalize arbitrary words**, even if they hold a special meaning in that context. Prefer *italic text* for highlighting certain words, [see below](#emphasis-and-highlighting). 101 | * When a link is embedded in a sentence (which is the preferred method), the standard chapter name always uses capital letters, thus breaking the rule of no arbitrary capitalization inside text. Therefore use a custom link name to fix the capitalization. As an example, here is a link to the [deployment manual](deployment-manual.md) documentation. 102 | * Do capitalize names, such as *Azure*. 103 | 104 | ### Emphasis and highlighting 105 | 106 | There are two ways to emphasize or highlight words, making them bold or making them italic. The effect of bold text is that **bold text sticks out** and therefore can easily be noticed while skimming a piece of text or even just scrolling over a page. Bold is great to highlight phrases that people should remember. However, **use bold text rarely**, because it is generally distracting. 107 | 108 | Often one wants to either 'group' something that belongs logically together or highlight a specific term, because it has a special meaning. Such things do not need to stand out of the overall text. Use italic text as a *lightweight method* to highlight something. 109 | 110 | Similarly, when a filename, a path or a menu-entry is mentioned in text, prefer to make it italic to logically group it, without being distracting. 111 | 112 | In general, try to **avoid unnecessary text highlighting**. Special terms can be highlighted once to make the reader aware, do not repeat such highlighting throughout the text, when it serves no purpose anymore and only distracts. 113 | 114 | ### Links 115 | 116 | Insert as many useful links to other pages as possible, but each link only once. Assume a reader clicks on every link in the page, and think about how annoying it would be, if the same page opens 20 times. 117 | 118 | Prefer links embedded in a sentence: 119 | 120 | * BAD: Guidelines are useful. See [this chapter](documentation-guide.md) for details. 121 | * GOOD: [Guidelines](documentation-guide.md) are useful. 122 | 123 | When adding a link, consider whether it should also be listed in the [See also](#see-also) section. Similarly, check whether a link to the new page should be added to the linked-to page. 124 | 125 | ## Page completion checklist 126 | 127 | 1. Ensure that this document's guidelines were followed. 128 | 1. Browse the document structure and see if the new document could be mentioned under the [See also](#see-also) section of other pages. 129 | 1. If available, have someone with knowledge of the topic proof-read the page for technical correctness. 130 | 1. Have someone proof-read the page for style and formatting. This can be someone unfamiliar with the topic, which is also a good idea to get feedback about how understandable the documentation is. 131 | 132 | ## Tools for editing MarkDown 133 | 134 | [Visual Studio Code](https://code.visualstudio.com/) is a great tool for editing markdown files. 135 | 136 | When writing documentation, installing the following two extensions is also highly recommended: 137 | 138 | - Docs Markdown Extension for Visual Studio Code - Use Alt+M to bring up a menu of docs authoring options. 139 | 140 | - Code Spell Checker - misspelled words will be underlined; right-click on a misspelled word to change it or save it to the dictionary. 141 | 142 | Both of these come packaged in the Microsoft published Docs Authoring Pack. 143 | 144 | ## See also 145 | 146 | - [Microsoft Docs contributor guide overview](https://docs.microsoft.com/en-us/contribute/) 147 | -------------------------------------------------------------------------------- /documentation/manufacturing-kpis.md: -------------------------------------------------------------------------------- 1 | 2 | # IoT Offline Dashboarding sample OEE drilldown 3 | 4 | As discussed in the [readme](/readme.md) this sample is based around a dashboard to display machine performance KPIs in a manufacturing environment. In particular, the sample includes the necessary data collection, calculations, and visualization of the Overall Equipment Effectiveness (OEE) metric, common to manufacturers. 5 | 6 | For more information on the chosen scenario as well as alternative options, please see [the sample documentation](dashboarding-sample.md) 7 | 8 | **Table of contents** 9 | * [KPI types](#kpi-types) 10 | * [Sample data sources and data flow](#sample-data-sources-and-data-flow) 11 | * [Site Level Performance dashboard](#site-level-performance-dashboard) 12 | 13 | ## KPI types 14 | 15 | ### Performance 16 | 17 | The performance KPI indicates whether or not a machine is working as intended & achieving it's desired output. It is calculated as: 18 | 19 | ```html 20 | Performance = (Good Items Produced/Total Time Machine was Running)/(Ideal Rate of Production) 21 | ``` 22 | 23 | "Ideal Rate of Production" is the expected rate of production and is provided as a parameter. The unit of the performance KPI is percentage (%) and "Ideal Rate of Production" is provided as a parameter to dashboards. 24 | 25 | ### Quality 26 | 27 | Quality is the ratio of items produced by the machine that pass quality checks over all items produced, assuming that there will be items that fail the quality bar. It is calculated as: 28 | 29 | ```html 30 | Quality = (Good Items Produced)/(Good Items Produced + Bad Items Produced) 31 | ``` 32 | 33 | The unit for the Quality KPI is a percentage (%) 34 | 35 | ### Availability 36 | 37 | Availability is defined as percentage of time the machine was available. Normally, this does not include any planned downtime, however for the sake of simplicity we assume that the sample factory operates 24x7. 38 | 39 | The calculation is as follows: 40 | 41 | ```html 42 | Availability = (Running Time)/(Running Time + Idle Time) 43 | ``` 44 | 45 | The unit for Availability KPI is a percentage (%) 46 | 47 | ### Operational Equipment Effectiveness (OEE) 48 | 49 | Finally, OEE is a higher level KPI that is calculated from the other KPIs above and depicts the overall equipment efficiency of within the manufacturing process: 50 | 51 | ```html 52 | OEE = Availability x Quality x Performance 53 | ``` 54 | 55 | The unit for the OEE KPI is a percentage (%) 56 | 57 | ## Sample data sources and data flow 58 | 59 | The flow of data within the sample is depicted by green arrows in the following diagram. 60 | 61 | ![Diagram showing the data flow of the sample](../media/dataflow.png) 62 | 63 | * Two simulators act as OPC servers 64 | * OPC Publisher subscribes to three data points in OPC Servers 65 | * Data collected by the OPC Publisher is sent to the cloud (through the Edge Hub module) AND in parallel is routed to the offline dashboards Node-RED module for processing. 66 | * Node-RED module unifies the data format and writes the data into InfluxDB 67 | * Grafana dashboards reads the data from InfluxDB and displays dashboards to operators and users. 68 | * OPC Publisher, Node-RED module, InfluxDB and Grafana are all deployed as separate containers through the IOT Edge runtime. 69 | * For sake of simplicity, two OPC simulators are also deployed as Node-RED modules in a container through the IoT Edge runtime. 70 | 71 | ### OPC Simulator 72 | 73 | This sample solution uses an [OPC simulator](https://flows.nodered.org/node/node-red-contrib-opcua) to simulate a data flow coming from machines in a manufacturing environment. 74 | 75 | OPC Simulator is a flow implemented in Node-Red. Two simulators are used to simulate two different OPC servers connected to the same IoT Edge device. 76 | 77 | | OPC Simulator Flow 1 | OPC Simulator Flow 2 | 78 | | --------------------------------------------- | ---------------------------------------------- | 79 | | ![Node-RED simulator1](../media/nodered_sim1.png) | ![Node-RED simulator 2](../media/nodered_sim2.png) | 80 | 81 | Simulators essentially have the same template, but differentiated by two settings: Product URI and port: 82 | 83 | | | Product URI | Port | 84 | | -------------------- | ----------- | ----- | 85 | | OPC Simulator Flow 1 | OPC-Site-01 | 54845 | 86 | | OPC Simulator Flow 2 | OPC-Site-02 | 54855 | 87 | 88 | The OPC simulators generate three data points: 89 | 90 | #### Data Point: STATUS 91 | 92 | STATUS indicates the current status of the device that the OPC server is connected to. STATUS values are randomly generated using following rules: 93 | 94 | * Value changes at least in 10 minute intervals 95 | * STATUS value is one of the following: 101,105,108, 102,104,106,107,109 96 | * STATUS values 101, 105, 108 indicate that the machine is running 97 | * STATUS values 102,104,106,107,109 indicate that the machine is not running 98 | * A random number generator ensures that machine will be in RUNNING state (i.e. STATUS 101,105,108) 90% of the time 99 | 100 | #### Data Point: ITEM_COUNT_GOOD 101 | 102 | ITEM_COUNT_GOOD indicates the number of good items (products that pass quality tests) produced by the machine since the last data point. It is a random integer between 80-120. Simulators generate item counts every 5 seconds. This could be taken in any unit but the sample regards it as "number of items". 103 | 104 | #### Data Point: ITEM_COUNT_BAD 105 | 106 | ITEM_COUNT_BAD indicates the number of bad items (ITEMS_DISCARDED) produced by the machine since the last data point. It is a random integer between 0-10. Simulators generate item counts every 5 seconds. This could be taken in any unit but the sample regards it as "number of items". 107 | 108 | ### Data Processing Module (Node-RED) 109 | 110 | Data collected from simulators by the OPC publisher module are sent to the Node-RED module for processing. The Node-RED module validates the data, converts it to a suitable format, and writes it to InfluxDB. 111 | 112 | During processing, the Application URI value is extracted from JSON data and written to the "Source" tag in the database schema. 113 | 114 | ### Database (InfluxDB) 115 | 116 | All data collected flows into a single measurement (DeviceData) in a single database (telemetry) in InfluxDB. The measurement "DeviceData" has 3 fields and 1 tag: 117 | 118 | **Fields:** 119 | 120 | * STATUS: float 121 | * ITEM_COUNT_GOOD: float 122 | * ITEM_COUNT_BAD: float 123 | 124 | **Tags:** 125 | 126 | * Source 127 | 128 | Note that STATUS values are preserved as they come from the OPC Server. These values are mapped to determine if the machine is running using InfluxDB queries. 129 | 130 | ## Site Level Performance dashboard 131 | 132 | The Site Level Performance dashboard displays the manufacturing KPIs (OEE, Availability, Quality, Performance) per site. 133 | 134 | ![dashboard](../media/dashboard.png) 135 | 136 | **Site** is basically defined as the OPC server providing the data and uses OPC Server's Product URI as the site name (Source). See the Node-RED module code for the algorithm used to extract the Product URI from Node Id. 137 | 138 | In a production implementations the site name will correspond to a specific equipment or asset. 139 | 140 | The sample application defines two different sites, corresponding to the two OPC Simulators. 141 | 142 | ![sitecombo](../media/sitecombo.png) 143 | 144 | **Ideal run rate** is the ideal capacity of production for the given equipment. It is used to calculate the Performance KPI. See the definition of the Performance KPI above for the calculation method. 145 | 146 | ![idealrunrate](../media/idealrunrate.png) 147 | 148 | Each row in the dashboard represents a KPI. The gauge on the left-hand side reports the KPI result as per selected time window. In the sample screenshot above, the time window is "Last 12 hours". Therefore the top left gauge for OEE KPI corresponds to a value of 54.23% over the last 12 hours. 149 | 150 | ![timeinterval](../media/timeinterval.png) 151 | 152 | In a production environment operators would typically monitor KPIs for their current shift. To do that the operator has to set the period start time in line with their shift and leave the end period as "now()": 153 | 154 | ![timeinterval2](../media/timeinterval2.png) 155 | 156 | Line graphs show indicators at 12:00AM, 08:00AM and 04:00PM to highlight working shift changes in the fictional manufacturing floor. 157 | 158 | Following table depicts details of each element in the dashboard: 159 | 160 | | Dashboard Element | Snapshot | 161 | | ------------------------------------------------------------ | :-----------------------------------------------: | 162 | | The OEE gauge shows the Operational Equipment Effectiveness for the selected time period. | ![OOEgauge](../media/OEEgauge.png) | 163 | | OEE graph shows the change across the selected time period. Minimum, Maximum, Average values of OEE across time period are provided in the legend. | ![oeegraph](../media/oeegraph.png) | 164 | | The availability gauge shows Availability for the time period selected. | ![availabilitygauge](../media/availabilitygauge.png) | 165 | | Availability graph shows value changes over the time period selected. Minimum, Maximum, Average values of Availability across time period are provided in the legend. The blue line indicates when a machine was actually running. | ![availabilitygraph](../media/availabilitygraph.png) | 166 | | The quality gauge shows Quality for the time period selected. | ![qualitygauge](../media/qualitygauge.png) | 167 | | Quality graph shows value change over the selected time period on the left axis. It also shows the number of "Good Items" produced (items that are properly manufactured) as a green line - as well as "Bad Items" produced (items that are discarded) asa red line. Note that the "Good Items" and "Bad Items" are aggregated at the minute level and their unit is "number of items per minute". The "Ideal Run Rate" parameter value, entered manually at the top of dashboard, is shown as a reference aligned to the right axis. Minimum, Maximum, Average values of Quality, Good Items and Bad Items are provided in the legend. | ![qualitygraph](../media/qualitygraph.png) | 168 | | Performance gauge shows the Performance for the time period selected. | ![performancegauge](../media/performancegauge.png) | 169 | | The performance graph shows the change in Performance for the time period selected. Minimum, Maximum, Average values of Performance across time period are provided in the legend. The "Ideal Run Rate" parameter value, entered manually at the top of dashboard, is shown as a reference line, again, aligned to the right axis. | ![performancegraph](../media/performancegraph.png) | 170 | 171 | ### Building the "Site Level Performance" dashboard 172 | 173 | #### Variables 174 | 175 | `$idealRunrate` is defined as a constant value (1400) which indicates the ideal throughput of the equipment/asset. This variable is used to calculate Performance and Quality. 176 | 177 | `$Source` is defined as a query from influxdb database which pulls available "Source" tags from "DeviceData" measurement. This is essentially a list of assets we have measurements in the selected time range (`$range`). 178 | 179 | ``` 180 | from(bucket: "telemetry") 181 | |> range($range) 182 | |> filter(fn: (r) =>r._measurement == "DeviceData") 183 | |> keep(columns:["Source"]) 184 | |> distinct(column: "Source") 185 | ``` 186 | 187 | #### Panels 188 | 189 | The dashboard contains 8 panels which are built on similar Flux queries of the same data source. The most comprehensive of these queries is used in the "OEE History" since calculating the OEE actually involves processing other KPIs. See [KPI types](#kpi-types) above. 190 | 191 | ![](..\media\oeegraph.png) 192 | 193 | The following section examines the Flux query for the "OEE History" panel in detail. 194 | 195 | ``` 196 | import "math" 197 | 198 | import "csv" 199 | ``` 200 | 201 | Define that the STATUS values 101,105 and 108 will be considered as "asset is in RUNNING state". 202 | 203 | ``` 204 | StatusValuesForOn = [101,105,108] 205 | ``` 206 | 207 | The `fGetLastStatus` function finds the last STATUS value in the relevant time range. The simulator changes the STATUS value every 10 minutes, however the OPC Server does not publish it unless the new value is different than the previous one. At any time, STATUS value should be considered as the "last set value". 208 | 209 | ``` 210 | fGetLastStatus = () => { 211 | ``` 212 | 213 | A dummy record is defined to avoid errors from the `tableFind` function later. 214 | 215 | ``` 216 | dummyRecordCsv = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string\n#group,false,false,true,true,false,false,true,true,true\n#default,_result,,,,,,,,\n,result,table,_start,_stop,_time,_value,Source,_field,_measurement\n,,0,2030-01-01T00:00:00.0Z,2030-01-01T00:00:00.0Z,2030-01-01T00:00:00.0Z,0,,STATUS,DeviceData" 217 | 218 | dummyRecord=csv.from(csv: dummyRecordCsv ) 219 | ``` 220 | 221 | Find the first STATUS value in the currently selected time range and for the selected asset (source). 222 | 223 | ``` 224 | firstStatusTimestampTemp= 225 | from(bucket: "telemetry") 226 | |> range([[range]]) 227 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 228 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 229 | |> filter(fn: (r) => r._field == "STATUS" ) 230 | |> first() 231 | ``` 232 | 233 | Then union the result with the dummy record so that the result will have at least one table, even if there aren't any STATUS values in the current range. 234 | 235 | ``` 236 | firstStatusTimestamp= 237 | union(tables: [firstStatusTimestampTemp,dummyRecord]) 238 | |> tableFind(fn: (key) => key._field == "STATUS" ) 239 | |> getColumn(column: "_time") 240 | ``` 241 | 242 | Search for the last (latest) STATUS value and the first (oldest) STATUS value in selected time range. Note that in later versions of the Grafana connector (7.0+) for Flux, it isn't required to search for the first value in range. 243 | 244 | ``` 245 | lastStatusBeforeRangeTemp= 246 | from(bucket: "telemetry") 247 | |> range(start:-1000d, // Flux queries have to have the start of a range, 248 | // make sure start of range is old enough to cover all cases 249 | stop: time(v:uint(v:firstStatusTimestamp[0])-uint(v:1))) 250 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 251 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 252 | |> map(fn: (r) => ({ 253 | _field: "STATUS", 254 | _time: r._time, 255 | _value: float(v: contains(value: int(v: r._value), 256 | set : StatusValuesForOn ))//If STATUS value is one of StatusValuesForOn 257 | // then return 1.0f else 0.0f 258 | })) 259 | |> last() 260 | ``` 261 | 262 | Again, union the result with a dummy record so that the result will have at least one table. 263 | 264 | ``` 265 | lastStatusBeforeRange= 266 | union(tables: [lastStatusBeforeRangeTemp,dummyRecord]) 267 | |> tableFind(fn: (key) => key._field == "STATUS" ) 268 | |> getColumn(column: "_value") 269 | ``` 270 | 271 | The following will return the latest STATUS value (as 1 or 0). 272 | 273 | ``` 274 | return lastStatusBeforeRange[length(arr:lastStatusBeforeRange)-1] 275 | } 276 | ``` 277 | 278 | Filter all fields (aka measurements: ITEM_COUNT_GOOD, ITEM_COUNT_BAD, STATUS) for the selected time range and selected asset source. This will be the base query template. 279 | 280 | ``` 281 | DeviceData=from(bucket: "telemetry") 282 | |> range([[range]]) 283 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 284 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 285 | |> group() //remove any grouping 286 | |> keep(columns: ["_time","_field","_value"]) 287 | ``` 288 | 289 | Extract the field ITEM_COUNT_GOOD, adding values in one minute intervals and calculating the cumulative sum. 290 | 291 | ``` 292 | ItemCountGoodData= 293 | DeviceData 294 | |> filter(fn: (r) => r._field == "ITEM_COUNT_GOOD" ) 295 | |> aggregateWindow(every: 1m, fn: sum) 296 | |> cumulativeSum() 297 | ``` 298 | 299 | Extract the field ITEM_COUNT_BAD, adding values in one minute intervals and calculating the cumulative sum. 300 | 301 | ``` 302 | ItemCountBadData= 303 | DeviceData 304 | |> filter(fn: (r) => r._field == "ITEM_COUNT_BAD" ) 305 | |> aggregateWindow(every: 1m, fn: sum) 306 | |> cumulativeSum() 307 | ``` 308 | 309 | Extract the field STATUS, converting the values to 0.0 or 1.0. STATUS values in the set `StatusValuesForOn` (see above) are mapped to 1.0, all other are mapped to 0.0. 310 | 311 | ``` 312 | StatusData= 313 | DeviceData 314 | |> filter(fn: (r) => r._field == "STATUS") 315 | |> map(fn: (r) => ({ 316 | _time: r._time, 317 | _value: float(v: contains(value: int(v: r._value), 318 | set: StatusValuesForOn )) 319 | 320 | })) 321 | ``` 322 | 323 | Extract the field STATUS again, averaging the values in one minute intervals. If the average is > 0 for any 1 minute interval, it is assumed that the asset is running within this interval. 324 | 325 | ``` 326 | StatusDataWindowed= 327 | StatusData 328 | |> aggregateWindow(every: 1m, fn: mean) //calculate STATUS (1 or 0) for each 1 min window 329 | |> fill(column: "_value", usePrevious: true) // if there are no values in any window, use previous status 330 | |> fill(column: "_value", value: fGetLastStatus()) //if there's no previous STATUS (i.e number of null 331 | //records in the beginning of the range), calculate 332 | //the last status before range starts 333 | |> map(fn: (r) => ({_time: r._time, _value: math.ceil(x: r._value)})) 334 | ``` 335 | 336 | Calculate the cumulative sum over STATUS so that each row shows the minutes of uptime (= running) since the start of the range. 337 | 338 | ``` 339 | RunningMins= 340 | StatusDataWindowed 341 | |> cumulativeSum() 342 | ``` 343 | 344 | Calculate the cumulative sum over the negated STATUS so that each row shows the sum of idle minutes from the start of the range. 345 | 346 | ``` 347 | IdleMins= 348 | StatusDataWindowed 349 | |> map(fn: (r) => ({ 350 | _value: 1.0 - r._value, 351 | _time: r._time, 352 | })) 353 | |> cumulativeSum() 354 | ``` 355 | 356 | The values above represent the base datasets used to calculate the KPIs. 357 | 358 | Using `cumulativeSum` with the calculations above allows to calculate [all defined KPIs](#kpi-types) within the time range up to that row. 359 | 360 | For Availability the result dataset contains a row for every minute from the start of the range and a _value in every row that represents the respective availability result. 361 | 362 | ``` 363 | Availability= 364 | join( //join two datasets on _time, each dataset has a row for every minute from the start of range 365 | tables: {on: RunningMins, off: IdleMins}, 366 | on: ["_time"] 367 | ) 368 | |> map(fn: (r) => ({ 369 | _value: if r._value_on+r._value_off == 0.0 then 0.0 370 | else r._value_on/(r._value_on+r._value_off), //calculate KPI 371 | _time: r._time, // set time 372 | _field: "Availability" //set field/KPI name 373 | })) 374 | ``` 375 | 376 | The same is true for Quality. 377 | 378 | ``` 379 | Quality= 380 | join( //join two datasets on _time, each dataset has a row for every minute from the start of range 381 | tables: {good: ItemCountGoodData, bad: ItemCountBadData}, 382 | on: ["_time"] 383 | ) 384 | |> map(fn: (r) => ({ 385 | _value: if r._value_good+r._value_bad == 0.0 then 0.0 386 | else r._value_good/(r._value_good+r._value_bad), //calculate KPI 387 | _time: r._time, // set time 388 | _field: "Quality" //set field/KPI name 389 | })) 390 | ``` 391 | 392 | For Performance `$idealRunrate` represents the assets ideal production capacity. 393 | 394 | ``` 395 | Performance= 396 | join( //join two datasets on _time, each dataset has a row for every minute from the start of range 397 | tables: {good: ItemCountGoodData, on: RunningMins}, 398 | on: ["_time"] 399 | ) 400 | |> map(fn: (r) => ({ 401 | _value: if r._value_on == 0.0 or float(v:$idealRunrate) == 0.0 then 0.0 402 | else (r._value_good/r._value_on)/float(v:$idealRunrate), //Calculate KPI 403 | _time: r._time, //set time 404 | _value_on : r._value_on, 405 | _value_good: r._value_good, 406 | _field: "Performance" //set field/KPI name 407 | })) 408 | ``` 409 | 410 | The OEE is calculated from these base KPIs. Note that all KPIs are defined in a dataset containing the _time and _value. 411 | 412 | Since Flux does not support more than 2 table joins, this uses a temporary dataset to join Availability and Quality first. 413 | 414 | ``` 415 | AxQ= 416 | join( 417 | tables: {a:Availability, q: Quality}, 418 | on: ["_time"] 419 | ) 420 | |> map(fn: (r) => ({ 421 | _value: r._value_a * r._value_q, 422 | _time: r._time, 423 | _field: "AxQ" 424 | })) 425 | ``` 426 | 427 | Then join the temporary dataset with Performance to calculate the OEE: 428 | 429 | ``` 430 | OEE= 431 | join( 432 | tables: {axq:AxQ, p: Performance}, 433 | on: ["_time"] 434 | ) 435 | |> map(fn: (r) => ({ 436 | _value: r._value_axq * r._value_p, 437 | _time: r._time, 438 | _field: "OEE" 439 | })) 440 | |> yield(name: "OEE") // use yield function to materialize dataset 441 | ``` 442 | 443 | The OEE Gauge displays the value from the start to the end of of the selected range. Therefore its Flux query contains `sum()` instead of `cumulativeSum()` as the desired datasets should contain only one record rather than a record for every minute. 444 | 445 | Below is the Flux query for the OEE Gauge. The differences in the query are marked with `//****`. 446 | 447 | ``` 448 | import "math" 449 | import "csv" 450 | 451 | StatusValuesForOn = [101,105,108] 452 | 453 | fGetLastStatus = () => { 454 | 455 | dummyRecordCsv = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string\n#group,false,false,true,true,false,false,true,true,true\n#default,_result,,,,,,,,\n,result,table,_start,_stop,_time,_value,Source,_field,_measurement\n,,0,2030-01-01T00:00:00.0Z,2030-01-01T00:00:00.0Z,2030-01-01T00:00:00.0Z,0,,STATUS,DeviceData" 456 | 457 | dummyRecord=csv.from(csv: dummyRecordCsv ) 458 | 459 | firstStatusTimestampTemp= 460 | from(bucket: "telemetry") 461 | |> range([[range]]) 462 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 463 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 464 | |> filter(fn: (r) => r._field == "STATUS" ) 465 | |> first() 466 | 467 | firstStatusTimestamp= 468 | union(tables: [firstStatusTimestampTemp,dummyRecord]) 469 | |> tableFind(fn: (key) => key._field == "STATUS" ) 470 | |> getColumn(column: "_time") 471 | 472 | lastStatusBeforeRangeTemp= 473 | from(bucket: "telemetry") 474 | |> range(start:-1000d, 475 | stop: time(v:uint(v:firstStatusTimestamp[0])-uint(v:1))) 476 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 477 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 478 | |> map(fn: (r) => ({ 479 | _field: "STATUS", 480 | _time: r._time, 481 | _value: float(v: contains(value: int(v: r._value), 482 | set : StatusValuesForOn )) 483 | })) 484 | |> last() 485 | 486 | lastStatusBeforeRange= 487 | union(tables: [lastStatusBeforeRangeTemp,dummyRecord]) 488 | |> tableFind(fn: (key) => key._field == "STATUS" ) 489 | |> getColumn(column: "_value") 490 | 491 | return lastStatusBeforeRange[length(arr:lastStatusBeforeRange)-1] 492 | } 493 | 494 | DeviceData=from(bucket: "telemetry") 495 | |> range([[range]]) 496 | |> filter(fn: (r) => r._measurement == "DeviceData" ) 497 | |> filter(fn: (r) => r.Source == "[[Source]]" ) 498 | |> group() 499 | |> keep(columns: ["_time","_field","_value"]) 500 | 501 | ItemCountGoodData= 502 | DeviceData 503 | |> filter(fn: (r) => r._field == "ITEM_COUNT_GOOD" ) 504 | |> aggregateWindow(every: 1m, fn: sum) 505 | |> sum() //**** 506 | |> map(fn: (r) => ({ 507 | _value: r._value, 508 | _time: now() 509 | })) 510 | 511 | ItemCountBadData= 512 | DeviceData 513 | |> filter(fn: (r) => r._field == "ITEM_COUNT_BAD" ) 514 | |> aggregateWindow(every: 1m, fn: sum) 515 | |> sum() //**** 516 | |> map(fn: (r) => ({ 517 | _value: r._value, 518 | _time: now() 519 | })) 520 | 521 | StatusData= 522 | DeviceData 523 | |> filter(fn: (r) => r._field == "STATUS") 524 | |> map(fn: (r) => ({ 525 | _time: r._time, 526 | _value: float(v: contains(value: int(v: r._value), 527 | set: StatusValuesForOn )) 528 | })) 529 | 530 | StatusDataWindowed= 531 | StatusData 532 | |> aggregateWindow(every: 1m, fn: mean) 533 | |> fill(column: "_value", usePrevious: true) 534 | |> fill(column: "_value", value: fGetLastStatus()) 535 | |> map(fn: (r) => ({_time: r._time, _value: math.ceil(x: r._value)})) 536 | 537 | RunningMins= 538 | StatusDataWindowed 539 | |> sum() //**** 540 | |> map(fn: (r) => ({ 541 | _value: r._value, 542 | _time: now() 543 | })) 544 | 545 | IdleMins= 546 | StatusDataWindowed 547 | |> map(fn: (r) => ({ 548 | _value: 1.0 - r._value, 549 | _time: r._time, 550 | })) 551 | |> sum() //**** 552 | |> map(fn: (r) => ({ 553 | _value: r._value, 554 | _time: now() 555 | })) 556 | 557 | Availability= 558 | join( 559 | tables: {on: RunningMins, off: IdleMins}, 560 | on: ["_time"] 561 | ) 562 | |> map(fn: (r) => ({ 563 | _value: if r._value_on+r._value_off == 0.0 then 0.0 564 | else r._value_on/(r._value_on+r._value_off), 565 | _time: r._time, 566 | _field: "Availability" 567 | })) 568 | 569 | Quality= 570 | join( 571 | tables: {good: ItemCountGoodData, bad: ItemCountBadData}, 572 | on: ["_time"] 573 | ) 574 | |> map(fn: (r) => ({ 575 | _value: if r._value_good+r._value_bad == 0.0 then 0.0 576 | else r._value_good/(r._value_good+r._value_bad), 577 | _time: r._time, 578 | _field: "Quality" 579 | })) 580 | 581 | Performance= 582 | join( 583 | tables: {good: ItemCountGoodData, on: RunningMins}, 584 | on: ["_time"] 585 | ) 586 | |> map(fn: (r) => ({ 587 | _value: if r._value_on == 0.0 or float(v:$idealRunrate) == 0.0 then 0.0 588 | else (r._value_good/r._value_on)/float(v:$idealRunrate), 589 | _time: r._time, 590 | _value_on : r._value_on, _value_good: r._value_good, 591 | _field: "Performance" 592 | })) 593 | 594 | AxQ= 595 | join( 596 | tables: {a:Availability, q: Quality}, 597 | on: ["_time"] 598 | ) 599 | |> map(fn: (r) => ({ 600 | _value: r._value_a * r._value_q, 601 | _time: r._time, 602 | _field: "AxQ" 603 | })) 604 | 605 | OEE= 606 | join( 607 | tables: {axq:AxQ, p: Performance}, 608 | on: ["_time"] 609 | ) 610 | |> map(fn: (r) => ({ 611 | _value: r._value_axq * r._value_p, 612 | _time: r._time, 613 | _field: "OEE" 614 | })) 615 | |> yield(name: "OEE") 616 | ``` 617 | -------------------------------------------------------------------------------- /documentation/setup-edge-environment.md: -------------------------------------------------------------------------------- 1 | # Creating an Azure environment to develop and run IoT Edge services 2 | 3 | This document describes how to set up the required Azure services as well as an AMD64-Linux-based IoT Edge box to deploy the sample. 4 | 5 | **Table of contents** 6 | * [Install Azure CLI](#install-azure-cli) 7 | * [Install Azure IoT for deployment](#install-azure-iot-for-deployment) 8 | * [Create the Azure and Edge services needed](#create-the-required-azure-and-edge-services) 9 | 10 | ## Install Azure CLI 11 | 12 | This sample leverages the [Azure Command Line Interface (CLI)](https://docs.microsoft.com/en-us/cli). To install the Azure CLI for your environment, follow the instructions [here](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) for the desired environment. 13 | 14 | Alternately, use the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart?view=azure-cli-latest) in the Azure Portal, which has the CLI pre-installed. Follow the instructions until the "Create a resource group" section. 15 | 16 | ## Install Azure-IoT for deployment 17 | 18 | The `azure-iot` extension is used to deploy the sample. 19 | 20 | Note that the legacy version was called `azure-iot-cli-ext`: 21 | 22 | * Use the command `az extension list` to validate the currently installed extensions before you install azure-iot. 23 | * Use `az extension remove --name azure-cli-iot-ext` to remove the legacy version of the extension. 24 | 25 | Use `az extension add --name azure-iot` to add the most recent version of the extension. 26 | 27 | ## Create the required Azure and Edge services 28 | 29 | Create a resource group to manage all the resources used in this solution: 30 | 31 | ```bash 32 | az group create --name {resource_group} --location {datacenter_location} 33 | ``` 34 | 35 | ### Create an Azure Container Registry 36 | 37 | This sample is based on Docker images for each module, which are pushed to a Docker container registry. 38 | 39 | If not already available, set up an Azure Container Registry with these [instructions](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli#create-a-container-registry). Once created navigate to the "Access Keys" blade in the left navigation of the container registry settings and note down the username and password. 40 | 41 | ### Create an Azure IoT Hub 42 | 43 | Create a new IoT Hub resource. Detailed information can be found at: 44 | 45 | ```bash 46 | az iot hub create --resource-group {resource_group} --name {hub_name} --sku S1 47 | ``` 48 | 49 | ### Create an IoT Edge device identity 50 | 51 | A device identity is required for each IoT Edge device so that it can communicate with the IoT Hub. The device identity lives in the cloud, and the device uses a unique device connection string to associate itself to its device identity. 52 | 53 | Detailed information can be found at: 54 | 55 | ```bash 56 | az iot hub device-identity create --hub-name {hub_name} --device-id myEdgeDevice --edge-enabled 57 | ``` 58 | 59 | Retrieve the connection string for the created device, which links the physical device with its identity in the IoT Hub. 60 | 61 | ```bash 62 | az iot hub device-identity show-connection-string --device-id myEdgeDevice --hub-name {hub_name} 63 | ``` 64 | 65 | Copy the value of the `connectionString` from the JSON output and save it. The connection string is used to configure the IoT Edge runtime in the next section. 66 | 67 | ![Retrieve connection string from CLI output](../media/retrieve-connection-string.png) 68 | 69 | ### Create and configure an IoT Edge VM 70 | 71 | The sample can be run on a [physical IoT Edge device](https://catalog.azureiotsolutions.com/) or a virtual machine. The following command will create an Ubuntu Linux VM, deploy Azure IoT Edge v1.2 and connect it to the IoT Hub instance/device given in the "deviceConnectionString" 72 | 73 | ```Bash 74 | az deployment group create \ 75 | --name edgeVMDeployment \ 76 | --resource-group {resource_group} \ 77 | --template-uri "https://raw.githubusercontent.com/Azure/iotedge-vm-deploy/1.2.0/edgeDeploy.json" \ 78 | --parameters dnsLabelPrefix='myedgevm' \ 79 | --parameters adminUsername='azureuser' \ 80 | --parameters deviceConnectionString=$(az iot hub device-identity connection-string show --device-id myEdgeDevice --hub-name {hub_name} -o tsv) \ 81 | --parameters authenticationType='sshPublicKey' \ 82 | --parameters adminPasswordOrKey="$(< ~/.ssh/id_rsa.pub)" 83 | ``` 84 | 85 | If you want to use username and password based authentication instead of ssh keys, here's the command you can use 86 | ```Bash 87 | az deployment group create \ 88 | --name edgeVMDeployment \ 89 | --resource-group {resource_group} \ 90 | --template-uri "https://raw.githubusercontent.com/Azure/iotedge-vm-deploy/1.2.0/edgeDeploy.json" \ 91 | --parameters dnsLabelPrefix='myedgevm' \ 92 | --parameters adminUsername='azureuser' \ 93 | --parameters deviceConnectionString=$(az iot hub device-identity connection-string show --device-id myEdgeDevice --hub-name {hub_name} -o tsv) \ 94 | --parameters authenticationType='password' \ 95 | --parameters adminPasswordOrKey="" 96 | ``` 97 | 98 | If successful, SSH into your device / VM using the 'azureuser' username and run: 99 | 100 | ```bash 101 | iotedge list 102 | ``` 103 | 104 | This should list the edgeAgent module running, indicating a successful setup. 105 | 106 | ## Dashboard solution preparation 107 | 108 | The sample is using a persistent storage for the InfluxDB database, which requires a directory for the module to bind to. Use the ssh command to log into the Edge device / VM and run the following commands: 109 | 110 | ```bash 111 | sudo mkdir /influxdata 112 | sudo chmod 777 -R /influxdata 113 | ``` 114 | 115 | Next, open a port for the Grafana dashboards. The default Grafana port is 3000: 116 | 117 | > [!NOTE] 118 | > This is not required in a production environment / using real devices, as any "offline" clients will probably be on the same network as the IoT Edge box. This is only required if using a VM in Azure. We apply rule to subnet because there's already a subnet created above with the edge VM. 119 | 120 | ```bash 121 | az vm open-port --resource-group {resource group} --name {edge vm name} --apply-to-subnet --port 3000 --priority 200 122 | ``` 123 | 124 | Finally set the tag for dashboarding: 125 | 126 | ```bash 127 | az iot hub device-twin update --device-id myEdgeDevice --hub-name offlinedashboardsv1hub --set tags='{"dashboard": true}' 128 | ``` 129 | 130 | You can now return to the [dashboarding sample](dashboarding-sample.md#deployment-of-the-sample) document to pick a deployment strategy. 131 | -------------------------------------------------------------------------------- /media/OEEgauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/OEEgauge.png -------------------------------------------------------------------------------- /media/OfflineDashboards_diag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/OfflineDashboards_diag.png -------------------------------------------------------------------------------- /media/OfflineDashboards_diag0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/OfflineDashboards_diag0.png -------------------------------------------------------------------------------- /media/OfflineDashboards_diag1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/OfflineDashboards_diag1.png -------------------------------------------------------------------------------- /media/OfflineDashboards_diag2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/OfflineDashboards_diag2.png -------------------------------------------------------------------------------- /media/availabilitygauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/availabilitygauge.png -------------------------------------------------------------------------------- /media/availabilitygraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/availabilitygraph.png -------------------------------------------------------------------------------- /media/dashboard-asset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/dashboard-asset.png -------------------------------------------------------------------------------- /media/dashboard-production.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/dashboard-production.png -------------------------------------------------------------------------------- /media/dashboard-sitelevel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/dashboard-sitelevel.png -------------------------------------------------------------------------------- /media/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/dashboard.png -------------------------------------------------------------------------------- /media/dataflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/dataflow.png -------------------------------------------------------------------------------- /media/edge-modules.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/edge-modules.png -------------------------------------------------------------------------------- /media/edge-routes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/edge-routes.png -------------------------------------------------------------------------------- /media/edge-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/edge-success.png -------------------------------------------------------------------------------- /media/grafana-dash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/grafana-dash.png -------------------------------------------------------------------------------- /media/idealrunrate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/idealrunrate.png -------------------------------------------------------------------------------- /media/nodered_sim1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/nodered_sim1.png -------------------------------------------------------------------------------- /media/nodered_sim2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/nodered_sim2.png -------------------------------------------------------------------------------- /media/oeegraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/oeegraph.png -------------------------------------------------------------------------------- /media/performancegauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/performancegauge.png -------------------------------------------------------------------------------- /media/performancegraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/performancegraph.png -------------------------------------------------------------------------------- /media/qualitygauge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/qualitygauge.png -------------------------------------------------------------------------------- /media/qualitygraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/qualitygraph.png -------------------------------------------------------------------------------- /media/retrieve-connection-string.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/retrieve-connection-string.png -------------------------------------------------------------------------------- /media/sitecombo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/sitecombo.png -------------------------------------------------------------------------------- /media/timeinterval.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/timeinterval.png -------------------------------------------------------------------------------- /media/timeinterval2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/timeinterval2.png -------------------------------------------------------------------------------- /media/vscode-source-control.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AzureIoTGBB/iot-edge-offline-dashboarding/d32671ef9e4b7418e644ad72d28961b986e81f91/media/vscode-source-control.jpg -------------------------------------------------------------------------------- /modules/edgetoinfluxdb/Dockerfile: -------------------------------------------------------------------------------- 1 | # Start with upstream node-red 2 | FROM nodered/node-red 3 | 4 | # Copy package.json to the WORKDIR so npm builds all 5 | # of your added modules for Node-RED 6 | RUN npm install node-red-contrib-influxdb 7 | RUN npm install node-red-contrib-azure-iot-edge-kpm 8 | 9 | # Copy Node-RED project files into place 10 | COPY settings.js /data/settings.js 11 | COPY flows_cred.json /data/flows_cred.json 12 | COPY flows.json /data/flows.json 13 | 14 | EXPOSE 1880/tcp 15 | 16 | # Start the container normally 17 | CMD ["npm", "start"] 18 | -------------------------------------------------------------------------------- /modules/edgetoinfluxdb/flows.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "a1cd98e4.e9ed18", 4 | "type": "tab", 5 | "label": "flowOPCToInflux", 6 | "disabled": false, 7 | "info": "" 8 | }, 9 | { 10 | "id": "90f6d0c.53b923", 11 | "type": "moduleclient", 12 | "z": "" 13 | }, 14 | { 15 | "id": "c0a47ecc.b1bd4", 16 | "type": "influxdb", 17 | "z": "", 18 | "hostname": "influxdb", 19 | "port": "8086", 20 | "protocol": "http", 21 | "database": "telemetry", 22 | "name": "", 23 | "usetls": false, 24 | "tls": "" 25 | }, 26 | { 27 | "id": "2d52392b.d6f636", 28 | "type": "moduleclient", 29 | "z": "" 30 | }, 31 | { 32 | "id": "d487b04.a1f095", 33 | "type": "influxdb", 34 | "z": "", 35 | "hostname": "influxdb", 36 | "port": "8086", 37 | "protocol": "http", 38 | "database": "telemetry", 39 | "name": "", 40 | "usetls": false, 41 | "tls": "" 42 | }, 43 | { 44 | "id": "22a2c45.5ed293c", 45 | "type": "moduleinput", 46 | "z": "a1cd98e4.e9ed18", 47 | "client": "2d52392b.d6f636", 48 | "input": "input1", 49 | "x": 150, 50 | "y": 240, 51 | "wires": [ 52 | [ 53 | "8afca10.928626", 54 | "7f142d2d.474494" 55 | ] 56 | ] 57 | }, 58 | { 59 | "id": "8afca10.928626", 60 | "type": "json", 61 | "z": "a1cd98e4.e9ed18", 62 | "name": "Covert to JSON", 63 | "property": "payload", 64 | "action": "obj", 65 | "pretty": false, 66 | "x": 380, 67 | "y": 240, 68 | "wires": [ 69 | [ 70 | "ed6d59de.312468" 71 | ] 72 | ] 73 | }, 74 | { 75 | "id": "6783c1a.f545e4", 76 | "type": "change", 77 | "z": "a1cd98e4.e9ed18", 78 | "name": "", 79 | "rules": [ 80 | { 81 | "t": "delete", 82 | "p": "payload.ContentMask", 83 | "pt": "msg" 84 | }, 85 | { 86 | "t": "delete", 87 | "p": "payload.Value", 88 | "pt": "msg" 89 | }, 90 | { 91 | "t": "move", 92 | "p": "payload.Value_", 93 | "pt": "msg", 94 | "to": "payload.Value", 95 | "tot": "msg" 96 | }, 97 | { 98 | "t": "delete", 99 | "p": "payload.StatusCode", 100 | "pt": "msg" 101 | }, 102 | { 103 | "t": "delete", 104 | "p": "payload.Status", 105 | "pt": "msg" 106 | }, 107 | { 108 | "t": "delete", 109 | "p": "payload.ApplicationUri", 110 | "pt": "msg" 111 | }, 112 | { 113 | "t": "delete", 114 | "p": "payload.Value_Type", 115 | "pt": "msg" 116 | } 117 | ], 118 | "action": "", 119 | "property": "", 120 | "from": "", 121 | "to": "", 122 | "reg": false, 123 | "x": 560, 124 | "y": 780, 125 | "wires": [ 126 | [] 127 | ] 128 | }, 129 | { 130 | "id": "41a2c16.97a0a4", 131 | "type": "influxdb batch", 132 | "z": "a1cd98e4.e9ed18", 133 | "influxdb": "d487b04.a1f095", 134 | "precision": "", 135 | "retentionPolicy": "", 136 | "name": "Write to Telemetry DB", 137 | "x": 780, 138 | "y": 240, 139 | "wires": [] 140 | }, 141 | { 142 | "id": "ed6d59de.312468", 143 | "type": "function", 144 | "z": "a1cd98e4.e9ed18", 145 | "name": "Build JSON", 146 | "func": "//type checking\nvar getType = function (elem) {\n return Object.prototype.toString.call(elem).slice(8, -1);\n};\n\nfunction appendLeadingZeroes(n,digits){\n var s=\"\";\n var start;\n if(n <= 9){\n start=1;\n }\n else if(n > 9 && n<= 99){\n start=2;\n }\n else if(n > 99){\n start=3;\n }\n \n for (i=start;i=0)\n{\n tmpStr = rnode.DisplayName.split(\"=\");\n rnode.DisplayName=tmpStr[tmpStr.length-1];\n}\n\nif (rnode.ApplicationUri === null || rnode.ApplicationUri === undefined || rnode.ApplicationUri === '')\n{\n tmpStr = rnode.NodeId.split(\"=\");\n if(tmpStr[0].length>2){rnode.ApplicationUri=tmpStr[0].substring(0,tmpStr[0].length-2);}\n else {rnode.ApplicationUri=tmpStr[0];}\n}\n\n//make sure timestamp property exists\nif (rnode.Timestamp === undefined){\n rnode.Timestamp = new Date().toString(); \n}\n\n\nrnode.time = new Date(rnode.Timestamp).getTime()*1000000;\n\nvar new_payload = \n {\n measurement: \"DeviceData\",\n fields: {\n //Value: rnode.Value\n },\n tags:{\n //NodeId: rnode.NodeId,\n //DataPoint: rnode.DisplayName,\n Source: rnode.ApplicationUri,\n //EventTime: formatDate(new Date(rnode.Timestamp))\n },\n timestamp: rnode.time\n }\n;\n\n//new_payload.measurement = rnode.DisplayName;\nnew_payload.fields[rnode.DisplayName]=rnode.Value;\nreturn new_payload;\n}\n\n\n\n//main\nif (getType(msg.payload) === 'Array'){\n for (index = 0; index < msg.payload.length; index++) { \n msg.payload[index] = processNode(msg.payload[index]); \n }\n} \nelse\n{\n var newnode = processNode(msg.payload);\n msg.payload = new Array(newnode);\n}\nreturn msg;\n\n", 147 | "outputs": 1, 148 | "noerr": 0, 149 | "x": 570, 150 | "y": 240, 151 | "wires": [ 152 | [ 153 | "41a2c16.97a0a4" 154 | ] 155 | ] 156 | }, 157 | { 158 | "id": "7f142d2d.474494", 159 | "type": "debug", 160 | "z": "a1cd98e4.e9ed18", 161 | "name": "", 162 | "active": false, 163 | "tosidebar": true, 164 | "console": false, 165 | "tostatus": false, 166 | "complete": "false", 167 | "x": 340, 168 | "y": 460, 169 | "wires": [] 170 | } 171 | ] -------------------------------------------------------------------------------- /modules/edgetoinfluxdb/flows_cred.json: -------------------------------------------------------------------------------- 1 | {"$":"581c0e28a0373b34539e1764726d15bcwME="} -------------------------------------------------------------------------------- /modules/edgetoinfluxdb/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "edgetoinfluxdb", 4 | "image": { 5 | "repository": "${CONTAINER_REGISTRY_ADDRESS}/iot-edge-offline-dashboarding/edgetoinfluxdb", 6 | "tag": { 7 | "version": "${CONTAINER_VERSION_TAG}", 8 | "platforms": { 9 | "amd64": "./Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [], 13 | "contextPath": "./" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/edgetoinfluxdb/settings.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright JS Foundation and other contributors, http://js.foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | **/ 16 | 17 | // The `https` setting requires the `fs` module. Uncomment the following 18 | // to make it available: 19 | //var fs = require("fs"); 20 | 21 | module.exports = { 22 | // the tcp port that the Node-RED web server is listening on 23 | uiPort: process.env.PORT || 1880, 24 | 25 | // By default, the Node-RED UI accepts connections on all IPv4 interfaces. 26 | // To listen on all IPv6 addresses, set uiHost to "::", 27 | // The following property can be used to listen on a specific interface. For 28 | // example, the following would only allow connections from the local machine. 29 | uiHost: "0.0.0.0", 30 | 31 | // Retry time in milliseconds for MQTT connections 32 | mqttReconnectTime: 15000, 33 | 34 | // Retry time in milliseconds for Serial port connections 35 | serialReconnectTime: 15000, 36 | 37 | // Retry time in milliseconds for TCP socket connections 38 | //socketReconnectTime: 10000, 39 | 40 | // Timeout in milliseconds for TCP server socket connections 41 | // defaults to no timeout 42 | //socketTimeout: 120000, 43 | 44 | // Maximum number of messages to wait in queue while attempting to connect to TCP socket 45 | // defaults to 1000 46 | //tcpMsgQueueSize: 2000, 47 | 48 | // Timeout in milliseconds for HTTP request connections 49 | // defaults to 120 seconds 50 | //httpRequestTimeout: 120000, 51 | 52 | // The maximum length, in characters, of any message sent to the debug sidebar tab 53 | debugMaxLength: 1000, 54 | 55 | // The maximum number of messages nodes will buffer internally as part of their 56 | // operation. This applies across a range of nodes that operate on message sequences. 57 | // defaults to no limit. A value of 0 also means no limit is applied. 58 | //nodeMessageBufferMaxLength: 0, 59 | 60 | // To disable the option for using local files for storing keys and certificates in the TLS configuration 61 | // node, set this to true 62 | //tlsConfigDisableLocalFiles: true, 63 | 64 | // Colourise the console output of the debug node 65 | //debugUseColors: true, 66 | 67 | // The file containing the flows. If not set, it defaults to flows_.json 68 | //flowFile: 'flows.json', 69 | 70 | // To enabled pretty-printing of the flow within the flow file, set the following 71 | // property to true: 72 | //flowFilePretty: true, 73 | 74 | // By default, credentials are encrypted in storage using a generated key. To 75 | // specify your own secret, set the following property. 76 | // If you want to disable encryption of credentials, set this property to false. 77 | // Note: once you set this property, do not change it - doing so will prevent 78 | // node-red from being able to decrypt your existing credentials and they will be 79 | // lost. 80 | credentialSecret: "azureiot", 81 | 82 | // By default, all user data is stored in a directory called `.node-red` under 83 | // the user's home directory. To use a different location, the following 84 | // property can be used 85 | //userDir: '/home/nol/.node-red/', 86 | 87 | // Node-RED scans the `nodes` directory in the userDir to find local node files. 88 | // The following property can be used to specify an additional directory to scan. 89 | //nodesDir: '/home/nol/.node-red/nodes', 90 | 91 | // By default, the Node-RED UI is available at http://localhost:1880/ 92 | // The following property can be used to specify a different root path. 93 | // If set to false, this is disabled. 94 | //httpAdminRoot: '/admin', 95 | 96 | // Some nodes, such as HTTP In, can be used to listen for incoming http requests. 97 | // By default, these are served relative to '/'. The following property 98 | // can be used to specifiy a different root path. If set to false, this is 99 | // disabled. 100 | //httpNodeRoot: '/red-nodes', 101 | 102 | // The following property can be used in place of 'httpAdminRoot' and 'httpNodeRoot', 103 | // to apply the same root to both parts. 104 | //httpRoot: '/red', 105 | 106 | // When httpAdminRoot is used to move the UI to a different root path, the 107 | // following property can be used to identify a directory of static content 108 | // that should be served at http://localhost:1880/. 109 | //httpStatic: '/home/nol/node-red-static/', 110 | 111 | // The maximum size of HTTP request that will be accepted by the runtime api. 112 | // Default: 5mb 113 | //apiMaxLength: '5mb', 114 | 115 | // If you installed the optional node-red-dashboard you can set it's path 116 | // relative to httpRoot 117 | //ui: { path: "ui" }, 118 | 119 | // Securing Node-RED 120 | // ----------------- 121 | // To password protect the Node-RED editor and admin API, the following 122 | // property can be used. See http://nodered.org/docs/security.html for details. 123 | adminAuth: { 124 | type: "credentials", 125 | users: [{ 126 | username: "admin", 127 | password: "$2a$08$iiR32/SpJlZkZQ3MGEtd8OuC22n5qtvO/ms7gCdi8mUxz0zxqpccy", 128 | permissions: "*" 129 | }, 130 | { 131 | username: "reader", 132 | password: "$2a$08$V6.hG3YSkMPhCBEs0tcnGeHpo3eRkNDB3/ESgQ0ibAZy7BDjhwjG6", 133 | permissions: ["read","debug.write"] 134 | }] 135 | }, 136 | 137 | // To password protect the node-defined HTTP endpoints (httpNodeRoot), or 138 | // the static content (httpStatic), the following properties can be used. 139 | // The pass field is a bcrypt hash of the password. 140 | // See http://nodered.org/docs/security.html#generating-the-password-hash 141 | //httpNodeAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, 142 | //httpStaticAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, 143 | 144 | // The following property can be used to enable HTTPS 145 | // See http://nodejs.org/api/https.html#https_https_createserver_options_requestlistener 146 | // for details on its contents. 147 | // See the comment at the top of this file on how to load the `fs` module used by 148 | // this setting. 149 | // 150 | //https: { 151 | // key: fs.readFileSync('privatekey.pem'), 152 | // cert: fs.readFileSync('certificate.pem') 153 | //}, 154 | 155 | // The following property can be used to cause insecure HTTP connections to 156 | // be redirected to HTTPS. 157 | //requireHttps: true, 158 | 159 | // The following property can be used to disable the editor. The admin API 160 | // is not affected by this option. To disable both the editor and the admin 161 | // API, use either the httpRoot or httpAdminRoot properties 162 | //disableEditor: false, 163 | 164 | // The following property can be used to configure cross-origin resource sharing 165 | // in the HTTP nodes. 166 | // See https://github.com/troygoode/node-cors#configuration-options for 167 | // details on its contents. The following is a basic permissive set of options: 168 | //httpNodeCors: { 169 | // origin: "*", 170 | // methods: "GET,PUT,POST,DELETE" 171 | //}, 172 | 173 | // If you need to set an http proxy please set an environment variable 174 | // called http_proxy (or HTTP_PROXY) outside of Node-RED in the operating system. 175 | // For example - http_proxy=http://myproxy.com:8080 176 | // (Setting it here will have no effect) 177 | // You may also specify no_proxy (or NO_PROXY) to supply a comma separated 178 | // list of domains to not proxy, eg - no_proxy=.acme.co,.acme.co.uk 179 | 180 | // The following property can be used to add a custom middleware function 181 | // in front of all http in nodes. This allows custom authentication to be 182 | // applied to all http in nodes, or any other sort of common request processing. 183 | //httpNodeMiddleware: function(req,res,next) { 184 | // // Handle/reject the request, or pass it on to the http in node by calling next(); 185 | // // Optionally skip our rawBodyParser by setting this to true; 186 | // //req.skipRawBodyParser = true; 187 | // next(); 188 | //}, 189 | 190 | // The following property can be used to pass custom options to the Express.js 191 | // server used by Node-RED. For a full list of available options, refer 192 | // to http://expressjs.com/en/api.html#app.settings.table 193 | //httpServerOptions: { }, 194 | 195 | // The following property can be used to verify websocket connection attempts. 196 | // This allows, for example, the HTTP request headers to be checked to ensure 197 | // they include valid authentication information. 198 | //webSocketNodeVerifyClient: function(info) { 199 | // // 'info' has three properties: 200 | // // - origin : the value in the Origin header 201 | // // - req : the HTTP request 202 | // // - secure : true if req.connection.authorized or req.connection.encrypted is set 203 | // // 204 | // // The function should return true if the connection should be accepted, false otherwise. 205 | // // 206 | // // Alternatively, if this function is defined to accept a second argument, callback, 207 | // // it can be used to verify the client asynchronously. 208 | // // The callback takes three arguments: 209 | // // - result : boolean, whether to accept the connection or not 210 | // // - code : if result is false, the HTTP error status to return 211 | // // - reason: if result is false, the HTTP reason string to return 212 | //}, 213 | 214 | // The following property can be used to seed Global Context with predefined 215 | // values. This allows extra node modules to be made available with the 216 | // Function node. 217 | // For example, 218 | // functionGlobalContext: { os:require('os') } 219 | // can be accessed in a function block as: 220 | // global.get("os") 221 | functionGlobalContext: { 222 | // os:require('os'), 223 | // jfive:require("johnny-five"), 224 | // j5board:require("johnny-five").Board({repl:false}) 225 | }, 226 | // `global.keys()` returns a list of all properties set in global context. 227 | // This allows them to be displayed in the Context Sidebar within the editor. 228 | // In some circumstances it is not desirable to expose them to the editor. The 229 | // following property can be used to hide any property set in `functionGlobalContext` 230 | // from being list by `global.keys()`. 231 | // By default, the property is set to false to avoid accidental exposure of 232 | // their values. Setting this to true will cause the keys to be listed. 233 | exportGlobalContextKeys: false, 234 | 235 | 236 | // Context Storage 237 | // The following property can be used to enable context storage. The configuration 238 | // provided here will enable file-based context that flushes to disk every 30 seconds. 239 | // Refer to the documentation for further options: https://nodered.org/docs/api/context/ 240 | // 241 | //contextStorage: { 242 | // default: { 243 | // module:"localfilesystem" 244 | // }, 245 | //}, 246 | 247 | // The following property can be used to order the categories in the editor 248 | // palette. If a node's category is not in the list, the category will get 249 | // added to the end of the palette. 250 | // If not set, the following default order is used: 251 | //paletteCategories: ['subflows','flow','input','output','function','parser','social','mobile','storage','analysis','advanced'], 252 | 253 | // Configure the logging output 254 | logging: { 255 | // Only console logging is currently supported 256 | console: { 257 | // Level of logging to be recorded. Options are: 258 | // fatal - only those errors which make the application unusable should be recorded 259 | // error - record errors which are deemed fatal for a particular request + fatal errors 260 | // warn - record problems which are non fatal + errors + fatal errors 261 | // info - record information about the general running of the application + warn + error + fatal errors 262 | // debug - record information which is more verbose than info + info + warn + error + fatal errors 263 | // trace - record very detailed logging + debug + info + warn + error + fatal errors 264 | // off - turn off all logging (doesn't affect metrics or audit) 265 | level: "info", 266 | // Whether or not to include metric events in the log output 267 | metrics: false, 268 | // Whether or not to include audit events in the log output 269 | audit: false 270 | } 271 | }, 272 | 273 | // Customising the editor 274 | editorTheme: { 275 | projects: { 276 | // To enable the Projects feature, set this value to true 277 | enabled: false 278 | } 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /modules/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:7.1.5 2 | 3 | RUN grafana-cli plugins install grafana-influxdb-flux-datasource 5.4.1 4 | 5 | COPY grafana-provisioning/ /etc/grafana/provisioning 6 | -------------------------------------------------------------------------------- /modules/grafana/azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | branches: 3 | include: 4 | - master 5 | paths: 6 | include: 7 | - modules/grafana/* 8 | - templates/azure-pipelines-module-build.yml 9 | 10 | pr: 11 | autoCancel: true 12 | 13 | jobs: 14 | - template: ../../templates/azure-pipelines-module-build.yml 15 | parameters: 16 | registryName: $(CONTAINER_REGISTRY_NAME) 17 | repositoryName: offline-dashboarding 18 | imageName: grafana 19 | variableGroupName: edge-deployment-settings 20 | workingDirectory: ./modules/grafana 21 | azureSubscription: $(azureSubscription) 22 | -------------------------------------------------------------------------------- /modules/grafana/grafana-provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | providers: 3 | - name: default 4 | folder: '' 5 | type: file 6 | disableDeletion: false 7 | editable: true 8 | options: 9 | path: /etc/grafana/provisioning/dashboards -------------------------------------------------------------------------------- /modules/grafana/grafana-provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - name: myinfluxdb 4 | type: grafana-influxdb-flux-datasource 5 | database: telemetry 6 | url: http://influxdb:8086 7 | isDefault: true 8 | editable: true 9 | jsonData: 10 | bucket: "telemetry" -------------------------------------------------------------------------------- /modules/grafana/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "grafana", 4 | "image": { 5 | "repository": "${CONTAINER_REGISTRY_ADDRESS}/iot-edge-offline-dashboarding/grafana", 6 | "tag": { 7 | "version": "${CONTAINER_VERSION_TAG}", 8 | "platforms": { 9 | "amd64": "./Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [], 13 | "contextPath": "./" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/influxdb/Dockerfile: -------------------------------------------------------------------------------- 1 | # start with standard public influx image 2 | FROM influxdb:1.8.2 3 | 4 | # copy in our database initialization script 5 | # creates telemetry database and sets retention policy 6 | COPY initdb.iql /docker-entrypoint-initdb.d/init.iql 7 | 8 | # copy in our config 9 | COPY influxdb.conf /etc/influxdb/influxdb.conf 10 | 11 | # launch influx daemon 12 | # TODO: probably don't need this 13 | CMD ["influxd"] 14 | -------------------------------------------------------------------------------- /modules/influxdb/azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | branches: 3 | include: 4 | - master 5 | paths: 6 | include: 7 | - modules/influxdb/* 8 | - templates/azure-pipelines-module-build.yml 9 | 10 | pr: 11 | autoCancel: true 12 | 13 | jobs: 14 | - template: ../../templates/azure-pipelines-module-build.yml 15 | parameters: 16 | registryName: $(CONTAINER_REGISTRY_NAME) 17 | repositoryName: offline-dashboarding 18 | imageName: influxdb 19 | variableGroupName: edge-deployment-settings 20 | workingDirectory: ./modules/influxdb 21 | azureSubscription: $(azureSubscription) 22 | -------------------------------------------------------------------------------- /modules/influxdb/influxdb.conf: -------------------------------------------------------------------------------- 1 | reporting-disabled = false 2 | bind-address = "127.0.0.1:8088" 3 | 4 | [meta] 5 | dir = "/var/lib/influxdb/meta" 6 | retention-autocreate = true 7 | logging-enabled = true 8 | 9 | [data] 10 | dir = "/var/lib/influxdb/data" 11 | index-version = "inmem" 12 | wal-dir = "/var/lib/influxdb/wal" 13 | wal-fsync-delay = "0s" 14 | validate-keys = false 15 | query-log-enabled = true 16 | cache-max-memory-size = 1073741824 17 | cache-snapshot-memory-size = 26214400 18 | cache-snapshot-write-cold-duration = "10m0s" 19 | compact-full-write-cold-duration = "4h0m0s" 20 | compact-throughput = 50331648 21 | compact-throughput-burst = 50331648 22 | max-series-per-database = 1000000 23 | max-values-per-tag = 100000 24 | max-concurrent-compactions = 0 25 | max-index-log-file-size = 1048576 26 | series-id-set-cache-size = 100 27 | series-file-max-concurrent-snapshot-compactions = 0 28 | trace-logging-enabled = false 29 | tsm-use-madv-willneed = false 30 | 31 | [coordinator] 32 | write-timeout = "10s" 33 | max-concurrent-queries = 0 34 | query-timeout = "0s" 35 | log-queries-after = "0s" 36 | max-select-point = 0 37 | max-select-series = 0 38 | max-select-buckets = 0 39 | 40 | [retention] 41 | enabled = true 42 | check-interval = "60m0s" 43 | 44 | [shard-precreation] 45 | enabled = true 46 | check-interval = "10m0s" 47 | advance-period = "30m0s" 48 | 49 | [monitor] 50 | store-enabled = true 51 | store-database = "_internal" 52 | store-interval = "10s" 53 | 54 | [subscriber] 55 | enabled = true 56 | http-timeout = "30s" 57 | insecure-skip-verify = false 58 | ca-certs = "" 59 | write-concurrency = 40 60 | write-buffer-size = 1000 61 | 62 | [http] 63 | enabled = true 64 | bind-address = ":8086" 65 | auth-enabled = false 66 | log-enabled = true 67 | suppress-write-log = false 68 | write-tracing = false 69 | flux-enabled = true 70 | flux-log-enabled = false 71 | pprof-enabled = true 72 | pprof-auth-enabled = false 73 | debug-pprof-enabled = false 74 | ping-auth-enabled = false 75 | https-enabled = false 76 | https-certificate = "/etc/ssl/influxdb.pem" 77 | https-private-key = "" 78 | max-row-limit = 0 79 | max-connection-limit = 0 80 | shared-secret = "" 81 | realm = "InfluxDB" 82 | unix-socket-enabled = false 83 | unix-socket-permissions = "0777" 84 | bind-socket = "/var/run/influxdb.sock" 85 | max-body-size = 25000000 86 | access-log-path = "" 87 | max-concurrent-write-limit = 0 88 | max-enqueued-write-limit = 0 89 | enqueued-write-timeout = 30000000000 90 | 91 | [logging] 92 | format = "auto" 93 | level = "info" 94 | suppress-logo = false 95 | 96 | [[graphite]] 97 | enabled = false 98 | bind-address = ":2003" 99 | database = "graphite" 100 | retention-policy = "" 101 | protocol = "tcp" 102 | batch-size = 5000 103 | batch-pending = 10 104 | batch-timeout = "1s" 105 | consistency-level = "one" 106 | separator = "." 107 | udp-read-buffer = 0 108 | 109 | [[collectd]] 110 | enabled = false 111 | bind-address = ":25826" 112 | database = "collectd" 113 | retention-policy = "" 114 | batch-size = 5000 115 | batch-pending = 10 116 | batch-timeout = "10s" 117 | read-buffer = 0 118 | typesdb = "/usr/share/collectd/types.db" 119 | security-level = "none" 120 | auth-file = "/etc/collectd/auth_file" 121 | parse-multivalue-plugin = "split" 122 | 123 | [[opentsdb]] 124 | enabled = false 125 | bind-address = ":4242" 126 | database = "opentsdb" 127 | retention-policy = "" 128 | consistency-level = "one" 129 | tls-enabled = false 130 | certificate = "/etc/ssl/influxdb.pem" 131 | batch-size = 1000 132 | batch-pending = 5 133 | batch-timeout = "1s" 134 | log-point-errors = true 135 | 136 | [[udp]] 137 | enabled = false 138 | bind-address = ":8089" 139 | database = "udp" 140 | retention-policy = "" 141 | batch-size = 5000 142 | batch-pending = 10 143 | read-buffer = 0 144 | batch-timeout = "1s" 145 | precision = "" 146 | 147 | [continuous_queries] 148 | log-enabled = true 149 | enabled = true 150 | query-stats-enabled = false 151 | run-interval = "1s" 152 | 153 | [tls] 154 | min-version = "" 155 | max-version = "" 156 | 157 | -------------------------------------------------------------------------------- /modules/influxdb/initdb.iql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE telemetry 2 | CREATE RETENTION POLICY rp1day ON telemetry DURATION 1d REPLICATION 1 DEFAULT 3 | -------------------------------------------------------------------------------- /modules/influxdb/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "influxdb", 4 | "image": { 5 | "repository": "${CONTAINER_REGISTRY_ADDRESS}/iot-edge-offline-dashboarding/influxdb", 6 | "tag": { 7 | "version": "${CONTAINER_VERSION_TAG}", 8 | "platforms": { 9 | "amd64": "./Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [], 13 | "contextPath": "./" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/opcpublisher/Dockerfile: -------------------------------------------------------------------------------- 1 | #pull the right publisher version ('latest' breaks us) 2 | FROM mcr.microsoft.com/iotedge/opc-publisher:2.6.96 3 | 4 | COPY publishedNodes.json /app/pn.json -------------------------------------------------------------------------------- /modules/opcpublisher/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "opcpublisher", 4 | "image": { 5 | "repository": "${CONTAINER_REGISTRY_ADDRESS}/iot-edge-offline-dashboarding/opcpublisher", 6 | "tag": { 7 | "version": "${CONTAINER_VERSION_TAG}", 8 | "platforms": { 9 | "amd64": "./Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [], 13 | "contextPath": "./" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/opcpublisher/publishedNodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "EndpointUrl": "opc.tcp://opcsimulator:54845/OPCUA/Site1", 4 | "UseSecurity": false, 5 | "OpcNodes": [ 6 | { 7 | "Id": "ns=1;s=STATUS", 8 | "OpcSamplingInterval": 1000, 9 | "OpcPublishingInterval": 5000, 10 | "DisplayName": "STATUS" 11 | }, 12 | { 13 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 14 | "OpcSamplingInterval": 1000, 15 | "OpcPublishingInterval": 5000, 16 | "DisplayName": "ITEM_COUNT_GOOD" 17 | }, 18 | { 19 | "Id": "ns=1;s=ITEM_COUNT_BAD", 20 | "OpcSamplingInterval": 1000, 21 | "OpcPublishingInterval": 5000, 22 | "DisplayName": "ITEM_COUNT_BAD" 23 | } 24 | ] 25 | }, 26 | { 27 | "EndpointUrl": "opc.tcp://opcsimulator:54855/OPCUA/Site2", 28 | "UseSecurity": false, 29 | "OpcNodes": [ 30 | { 31 | "Id": "ns=1;s=STATUS", 32 | "OpcSamplingInterval": 1000, 33 | "OpcPublishingInterval": 5000, 34 | "DisplayName": "STATUS" 35 | }, 36 | { 37 | "Id": "ns=1;s=ITEM_COUNT_GOOD", 38 | "OpcSamplingInterval": 1000, 39 | "OpcPublishingInterval": 5000, 40 | "DisplayName": "ITEM_COUNT_GOOD" 41 | }, 42 | { 43 | "Id": "ns=1;s=ITEM_COUNT_BAD", 44 | "OpcSamplingInterval": 1000, 45 | "OpcPublishingInterval": 5000, 46 | "DisplayName": "ITEM_COUNT_BAD" 47 | } 48 | ] 49 | } 50 | ] 51 | -------------------------------------------------------------------------------- /modules/opcsimulator/Dockerfile: -------------------------------------------------------------------------------- 1 | # Start with upstream node-red 2 | FROM nodered/node-red 3 | 4 | # Copy package.json to the WORKDIR so npm builds all 5 | # of your added modules for Node-RED 6 | RUN npm install node-red-contrib-opcua-server 7 | 8 | # Copy Node-RED project files into place 9 | COPY settings.js /data/settings.js 10 | COPY flows_cred.json /data/flows_cred.json 11 | COPY flows.json /data/flows.json 12 | 13 | EXPOSE 1880/tcp 14 | 15 | # Start the container normally 16 | CMD ["npm", "start"] 17 | -------------------------------------------------------------------------------- /modules/opcsimulator/flows.json: -------------------------------------------------------------------------------- 1 | [{"id":"63d89a00.43c598","type":"tab","label":"OPC UA Server (with URI) 1","disabled":false,"info":""},{"id":"f3f78d16.4c4c1","type":"tab","label":"OPC UA Server (with URI) 2","disabled":false,"info":""},{"id":"1940f427.c54a8c","type":"opcua-compact-server","z":"63d89a00.43c598","port":54845,"endpoint":"OPCUA/Site1","productUri":"OPC-Site-01","acceptExternalCommands":true,"maxAllowedSessionNumber":"10","maxConnectionsPerEndpoint":"10","maxAllowedSubscriptionNumber":"100","alternateHostname":"","name":"OPC Server @54845","showStatusActivities":false,"showErrors":true,"allowAnonymous":true,"individualCerts":false,"isAuditing":false,"serverDiscovery":true,"users":[],"xmlsetsOPCUA":[],"publicCertificateFile":"","privateCertificateFile":"","registerServerMethod":"1","discoveryServerEndpointUrl":"","capabilitiesForMDNS":"","maxNodesPerRead":1000,"maxNodesPerWrite":1000,"maxNodesPerHistoryReadData":100,"maxNodesPerBrowse":3000,"maxBrowseContinuationPoints":"10","maxHistoryContinuationPoints":"10","delayToInit":"1000","delayToClose":"200","serverShutdownTimeout":"100","addressSpaceScript":"function constructAlarmAddressSpace(server, addressSpace, eventObjects, done) {\n // server = the created node-opcua server\n // addressSpace = script placeholder\n // eventObjects = to hold event variables in memory from this script\n \n // internal global sandbox objects are \n // node = node of the flex server, \n // coreServer = core iiot server object for debug and access to nodeOPCUA,\n // and scriptObjects to hold variables and functions\n\n const opcua = coreServer.choreCompact.opcua;\n const LocalizedText = opcua.LocalizedText;\n const namespace = addressSpace.getOwnNamespace();\n \n const Variant = opcua.Variant;\n const DataType = opcua.DataType;\n const DataValue = opcua.DataValue;\n\n var flexServerInternals = this;\n \n this.sandboxFlowContext.set(\"status\", 0.0);\n this.sandboxFlowContext.set(\"item_count_good\", 0.0);\n this.sandboxFlowContext.set(\"item_count_bad\", 0.0);\n this.sandboxFlowContext.set(\"prev_status_timestamp\", 0.0);\n\n\n const rootFolder = addressSpace.findNode(\"RootFolder\");\n \n const myDevice = namespace.addFolder(rootFolder.objects, {\n \"browseName\": \"Simulation\"\n });\n\n const varStatus = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"STATUS\",\n \"nodeId\": \"ns=1;s=STATUS\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"status\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"status\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n\n const varItemCountGood = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"ITEM_COUNT_GOOD\",\n \"nodeId\": \"ns=1;s=ITEM_COUNT_GOOD\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"item_count_good\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"item_count_good\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n \n const varItemCountBad = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"ITEM_COUNT_BAD\",\n \"nodeId\": \"ns=1;s=ITEM_COUNT_BAD\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"item_count_bad\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"item_count_bad\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n \n done();\n}\n","x":540,"y":240,"wires":[]},{"id":"7c2c600d.ec34d","type":"inject","z":"63d89a00.43c598","name":"","topic":"","payload":"","payloadType":"date","repeat":"5","crontab":"","once":true,"onceDelay":"0.5","x":230,"y":120,"wires":[["d45c3f62.8cd78"]]},{"id":"d45c3f62.8cd78","type":"function","z":"63d89a00.43c598","name":"Generate values","func":"var prevStatus = flow.get('status');\nvar prevStatusTimestamp = flow.get('prev_status_timestamp');\nvar curStatus;\nvar isOnOrOff;\nvar itemCountGood;\nvar itemCountBad;\nconst ChangeInterval = 600000;\nconst StatusValuesForOn = [101.0,105.0,108.0];\nconst StatusValuesForOff = [102.0,104.0,106.0,107.0,109.0];\n\nif(prevStatus===0)\n{\n curStatus = StatusValuesForOn[parseInt((Math.random() * StatusValuesForOn.length-1), 10)];\n prevStatusTimestamp=Date.now();\n}\nelse if (Date.now()-prevStatusTimestamp>ChangeInterval)\n{\n isOnOrOff = (Math.random()<0.9);\n if (isOnOrOff)\n curStatus = StatusValuesForOn[parseInt((Math.random() * StatusValuesForOn.length-1), 10)];\n else\n curStatus = StatusValuesForOff[parseInt((Math.random() * StatusValuesForOff.length-1), 10)];\n\n prevStatusTimestamp=Date.now();\n}\nelse\n curStatus = prevStatus;\n\n\nisOnOrOff = StatusValuesForOn.includes(curStatus);\n\nif (isOnOrOff)\n{\n itemCountGood = 80.0+Math.round(Math.random()*41);\n itemCountBad = parseFloat(Math.round(Math.random()*11));\n}\nelse\n{\n itemCountGood = 0.0;\n itemCountBad = 0.0;\n}\n\nflow.set('prev_status_timestamp', prevStatusTimestamp);\nflow.set('status', curStatus) ;\nflow.set('item_count_good', itemCountGood);\nflow.set('item_count_bad', itemCountBad);\n\nmsg.payload = [\n flow.get('status'),\n flow.get('item_count_good'),\n flow.get('item_count_bad')\n]\nreturn msg;","outputs":1,"noerr":0,"x":450,"y":120,"wires":[["13553d77.3716e3"]]},{"id":"13553d77.3716e3","type":"debug","z":"63d89a00.43c598","name":"","active":false,"tosidebar":true,"console":false,"tostatus":false,"complete":"false","x":720,"y":120,"wires":[]},{"id":"f62394f8.ba3358","type":"opcua-compact-server","z":"f3f78d16.4c4c1","port":"54855","endpoint":"OPCUA/Site2","productUri":"OPC-Site-02","acceptExternalCommands":true,"maxAllowedSessionNumber":"10","maxConnectionsPerEndpoint":"10","maxAllowedSubscriptionNumber":"100","alternateHostname":"","name":"OPC Server @54855","showStatusActivities":false,"showErrors":true,"allowAnonymous":true,"individualCerts":false,"isAuditing":false,"serverDiscovery":true,"users":[],"xmlsetsOPCUA":[],"publicCertificateFile":"","privateCertificateFile":"","registerServerMethod":"1","discoveryServerEndpointUrl":"","capabilitiesForMDNS":"","maxNodesPerRead":1000,"maxNodesPerWrite":1000,"maxNodesPerHistoryReadData":100,"maxNodesPerBrowse":3000,"maxBrowseContinuationPoints":"10","maxHistoryContinuationPoints":"10","delayToInit":"1000","delayToClose":"200","serverShutdownTimeout":"100","addressSpaceScript":"function constructAlarmAddressSpace(server, addressSpace, eventObjects, done) {\n // server = the created node-opcua server\n // addressSpace = script placeholder\n // eventObjects = to hold event variables in memory from this script\n \n // internal global sandbox objects are \n // node = node of the flex server, \n // coreServer = core iiot server object for debug and access to nodeOPCUA,\n // and scriptObjects to hold variables and functions\n\n const opcua = coreServer.choreCompact.opcua;\n const LocalizedText = opcua.LocalizedText;\n const namespace = addressSpace.getOwnNamespace();\n \n const Variant = opcua.Variant;\n const DataType = opcua.DataType;\n const DataValue = opcua.DataValue;\n\n var flexServerInternals = this;\n \n this.sandboxFlowContext.set(\"status\", 0.0);\n this.sandboxFlowContext.set(\"item_count_good\", 0.0);\n this.sandboxFlowContext.set(\"item_count_bad\", 0.0);\n this.sandboxFlowContext.set(\"prev_status_timestamp\", 0.0);\n\n\n const rootFolder = addressSpace.findNode(\"RootFolder\");\n \n const myDevice = namespace.addFolder(rootFolder.objects, {\n \"browseName\": \"Simulation\"\n });\n\n const varStatus = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"STATUS\",\n \"nodeId\": \"ns=1;s=STATUS\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"status\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"status\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n\n const varItemCountGood = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"ITEM_COUNT_GOOD\",\n \"nodeId\": \"ns=1;s=ITEM_COUNT_GOOD\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"item_count_good\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"item_count_good\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n \n const varItemCountBad = namespace.addVariable({\n \"organizedBy\": myDevice,\n \"browseName\": \"ITEM_COUNT_BAD\",\n \"nodeId\": \"ns=1;s=ITEM_COUNT_BAD\",\n \"dataType\": \"Double\",\n \"value\": {\n \"get\": function() {\n return new Variant({\n \"dataType\": DataType.Double,\n \"value\": flexServerInternals.sandboxFlowContext.get(\"item_count_bad\")\n });\n },\n \"set\": function(variant) {\n flexServerInternals.sandboxFlowContext.set(\n \"item_count_bad\",\n parseFloat(variant.value)\n );\n return opcua.StatusCodes.Good;\n }\n }\n });\n \n done();\n}\n","x":540,"y":240,"wires":[]},{"id":"fe4c51b8.7f676","type":"inject","z":"f3f78d16.4c4c1","name":"","topic":"","payload":"","payloadType":"date","repeat":"5","crontab":"","once":true,"onceDelay":"0.5","x":230,"y":120,"wires":[["877a1383.c9dae"]]},{"id":"877a1383.c9dae","type":"function","z":"f3f78d16.4c4c1","name":"Generate values","func":"var prevStatus = flow.get('status');\nvar prevStatusTimestamp = flow.get('prev_status_timestamp');\nvar curStatus;\nvar isOnOrOff;\nvar itemCountGood;\nvar itemCountBad;\nconst ChangeInterval = 600000;\nconst StatusValuesForOn = [101.0,105.0,108.0];\nconst StatusValuesForOff = [102.0,104.0,106.0,107.0,109.0];\n\nif(prevStatus===0)\n{\n curStatus = StatusValuesForOn[parseInt((Math.random() * StatusValuesForOn.length-1), 10)];\n prevStatusTimestamp=Date.now();\n}\nelse if (Date.now()-prevStatusTimestamp>ChangeInterval)\n{\n isOnOrOff = (Math.random()<0.9);\n if (isOnOrOff)\n curStatus = StatusValuesForOn[parseInt((Math.random() * StatusValuesForOn.length-1), 10)];\n else\n curStatus = StatusValuesForOff[parseInt((Math.random() * StatusValuesForOff.length-1), 10)];\n\n prevStatusTimestamp=Date.now();\n}\nelse\n curStatus = prevStatus;\n\nisOnOrOff = StatusValuesForOn.includes(curStatus);\n\nif (isOnOrOff)\n{\n itemCountGood = 80.0+Math.round(Math.random()*41);\n itemCountBad = parseFloat(Math.round(Math.random()*11));\n}\nelse\n{\n itemCountGood = 0.0;\n itemCountBad = 0.0;\n}\n\nflow.set('prev_status_timestamp', prevStatusTimestamp);\nflow.set('status', curStatus) ;\nflow.set('item_count_good', itemCountGood);\nflow.set('item_count_bad', itemCountBad);\n\nmsg.payload = [\n flow.get('status'),\n flow.get('item_count_good'),\n flow.get('item_count_bad')\n]\nreturn msg;","outputs":1,"noerr":0,"x":450,"y":120,"wires":[["ea0aaf26.8f989"]]},{"id":"ea0aaf26.8f989","type":"debug","z":"f3f78d16.4c4c1","name":"","active":false,"tosidebar":true,"console":false,"tostatus":false,"complete":"false","x":720,"y":120,"wires":[]}] -------------------------------------------------------------------------------- /modules/opcsimulator/flows_cred.json: -------------------------------------------------------------------------------- 1 | {"$":"581c0e28a0373b34539e1764726d15bcwME="} -------------------------------------------------------------------------------- /modules/opcsimulator/module.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema-version": "0.0.1", 3 | "description": "opcsimulator", 4 | "image": { 5 | "repository": "${CONTAINER_REGISTRY_ADDRESS}/iot-edge-offline-dashboarding/opcsimulator", 6 | "tag": { 7 | "version": "${CONTAINER_VERSION_TAG}", 8 | "platforms": { 9 | "amd64": "./Dockerfile" 10 | } 11 | }, 12 | "buildOptions": [], 13 | "contextPath": "./" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/opcsimulator/settings.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright JS Foundation and other contributors, http://js.foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | **/ 16 | 17 | // The `https` setting requires the `fs` module. Uncomment the following 18 | // to make it available: 19 | //var fs = require("fs"); 20 | 21 | module.exports = { 22 | // the tcp port that the Node-RED web server is listening on 23 | uiPort: process.env.PORT || 1880, 24 | 25 | // By default, the Node-RED UI accepts connections on all IPv4 interfaces. 26 | // To listen on all IPv6 addresses, set uiHost to "::", 27 | // The following property can be used to listen on a specific interface. For 28 | // example, the following would only allow connections from the local machine. 29 | uiHost: "0.0.0.0", 30 | 31 | // Retry time in milliseconds for MQTT connections 32 | mqttReconnectTime: 15000, 33 | 34 | // Retry time in milliseconds for Serial port connections 35 | serialReconnectTime: 15000, 36 | 37 | // Retry time in milliseconds for TCP socket connections 38 | //socketReconnectTime: 10000, 39 | 40 | // Timeout in milliseconds for TCP server socket connections 41 | // defaults to no timeout 42 | //socketTimeout: 120000, 43 | 44 | // Maximum number of messages to wait in queue while attempting to connect to TCP socket 45 | // defaults to 1000 46 | //tcpMsgQueueSize: 2000, 47 | 48 | // Timeout in milliseconds for HTTP request connections 49 | // defaults to 120 seconds 50 | //httpRequestTimeout: 120000, 51 | 52 | // The maximum length, in characters, of any message sent to the debug sidebar tab 53 | debugMaxLength: 1000, 54 | 55 | // The maximum number of messages nodes will buffer internally as part of their 56 | // operation. This applies across a range of nodes that operate on message sequences. 57 | // defaults to no limit. A value of 0 also means no limit is applied. 58 | //nodeMessageBufferMaxLength: 0, 59 | 60 | // To disable the option for using local files for storing keys and certificates in the TLS configuration 61 | // node, set this to true 62 | //tlsConfigDisableLocalFiles: true, 63 | 64 | // Colourise the console output of the debug node 65 | //debugUseColors: true, 66 | 67 | // The file containing the flows. If not set, it defaults to flows_.json 68 | //flowFile: 'flows.json', 69 | 70 | // To enabled pretty-printing of the flow within the flow file, set the following 71 | // property to true: 72 | //flowFilePretty: true, 73 | 74 | // By default, credentials are encrypted in storage using a generated key. To 75 | // specify your own secret, set the following property. 76 | // If you want to disable encryption of credentials, set this property to false. 77 | // Note: once you set this property, do not change it - doing so will prevent 78 | // node-red from being able to decrypt your existing credentials and they will be 79 | // lost. 80 | credentialSecret: "azureiot", 81 | 82 | // By default, all user data is stored in a directory called `.node-red` under 83 | // the user's home directory. To use a different location, the following 84 | // property can be used 85 | //userDir: '/home/nol/.node-red/', 86 | 87 | // Node-RED scans the `nodes` directory in the userDir to find local node files. 88 | // The following property can be used to specify an additional directory to scan. 89 | //nodesDir: '/home/nol/.node-red/nodes', 90 | 91 | // By default, the Node-RED UI is available at http://localhost:1880/ 92 | // The following property can be used to specify a different root path. 93 | // If set to false, this is disabled. 94 | //httpAdminRoot: '/admin', 95 | 96 | // Some nodes, such as HTTP In, can be used to listen for incoming http requests. 97 | // By default, these are served relative to '/'. The following property 98 | // can be used to specifiy a different root path. If set to false, this is 99 | // disabled. 100 | //httpNodeRoot: '/red-nodes', 101 | 102 | // The following property can be used in place of 'httpAdminRoot' and 'httpNodeRoot', 103 | // to apply the same root to both parts. 104 | //httpRoot: '/red', 105 | 106 | // When httpAdminRoot is used to move the UI to a different root path, the 107 | // following property can be used to identify a directory of static content 108 | // that should be served at http://localhost:1880/. 109 | //httpStatic: '/home/nol/node-red-static/', 110 | 111 | // The maximum size of HTTP request that will be accepted by the runtime api. 112 | // Default: 5mb 113 | //apiMaxLength: '5mb', 114 | 115 | // If you installed the optional node-red-dashboard you can set it's path 116 | // relative to httpRoot 117 | //ui: { path: "ui" }, 118 | 119 | // Securing Node-RED 120 | // ----------------- 121 | // To password protect the Node-RED editor and admin API, the following 122 | // property can be used. See http://nodered.org/docs/security.html for details. 123 | adminAuth: { 124 | type: "credentials", 125 | users: [{ 126 | username: "admin", 127 | password: "$2a$08$iiR32/SpJlZkZQ3MGEtd8OuC22n5qtvO/ms7gCdi8mUxz0zxqpccy", 128 | permissions: "*" 129 | }, 130 | { 131 | username: "reader", 132 | password: "$2a$08$V6.hG3YSkMPhCBEs0tcnGeHpo3eRkNDB3/ESgQ0ibAZy7BDjhwjG6", 133 | permissions: ["read","debug.write"] 134 | }] 135 | }, 136 | 137 | // To password protect the node-defined HTTP endpoints (httpNodeRoot), or 138 | // the static content (httpStatic), the following properties can be used. 139 | // The pass field is a bcrypt hash of the password. 140 | // See http://nodered.org/docs/security.html#generating-the-password-hash 141 | //httpNodeAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, 142 | //httpStaticAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."}, 143 | 144 | // The following property can be used to enable HTTPS 145 | // See http://nodejs.org/api/https.html#https_https_createserver_options_requestlistener 146 | // for details on its contents. 147 | // See the comment at the top of this file on how to load the `fs` module used by 148 | // this setting. 149 | // 150 | //https: { 151 | // key: fs.readFileSync('privatekey.pem'), 152 | // cert: fs.readFileSync('certificate.pem') 153 | //}, 154 | 155 | // The following property can be used to cause insecure HTTP connections to 156 | // be redirected to HTTPS. 157 | //requireHttps: true, 158 | 159 | // The following property can be used to disable the editor. The admin API 160 | // is not affected by this option. To disable both the editor and the admin 161 | // API, use either the httpRoot or httpAdminRoot properties 162 | //disableEditor: false, 163 | 164 | // The following property can be used to configure cross-origin resource sharing 165 | // in the HTTP nodes. 166 | // See https://github.com/troygoode/node-cors#configuration-options for 167 | // details on its contents. The following is a basic permissive set of options: 168 | //httpNodeCors: { 169 | // origin: "*", 170 | // methods: "GET,PUT,POST,DELETE" 171 | //}, 172 | 173 | // If you need to set an http proxy please set an environment variable 174 | // called http_proxy (or HTTP_PROXY) outside of Node-RED in the operating system. 175 | // For example - http_proxy=http://myproxy.com:8080 176 | // (Setting it here will have no effect) 177 | // You may also specify no_proxy (or NO_PROXY) to supply a comma separated 178 | // list of domains to not proxy, eg - no_proxy=.acme.co,.acme.co.uk 179 | 180 | // The following property can be used to add a custom middleware function 181 | // in front of all http in nodes. This allows custom authentication to be 182 | // applied to all http in nodes, or any other sort of common request processing. 183 | //httpNodeMiddleware: function(req,res,next) { 184 | // // Handle/reject the request, or pass it on to the http in node by calling next(); 185 | // // Optionally skip our rawBodyParser by setting this to true; 186 | // //req.skipRawBodyParser = true; 187 | // next(); 188 | //}, 189 | 190 | // The following property can be used to pass custom options to the Express.js 191 | // server used by Node-RED. For a full list of available options, refer 192 | // to http://expressjs.com/en/api.html#app.settings.table 193 | //httpServerOptions: { }, 194 | 195 | // The following property can be used to verify websocket connection attempts. 196 | // This allows, for example, the HTTP request headers to be checked to ensure 197 | // they include valid authentication information. 198 | //webSocketNodeVerifyClient: function(info) { 199 | // // 'info' has three properties: 200 | // // - origin : the value in the Origin header 201 | // // - req : the HTTP request 202 | // // - secure : true if req.connection.authorized or req.connection.encrypted is set 203 | // // 204 | // // The function should return true if the connection should be accepted, false otherwise. 205 | // // 206 | // // Alternatively, if this function is defined to accept a second argument, callback, 207 | // // it can be used to verify the client asynchronously. 208 | // // The callback takes three arguments: 209 | // // - result : boolean, whether to accept the connection or not 210 | // // - code : if result is false, the HTTP error status to return 211 | // // - reason: if result is false, the HTTP reason string to return 212 | //}, 213 | 214 | // The following property can be used to seed Global Context with predefined 215 | // values. This allows extra node modules to be made available with the 216 | // Function node. 217 | // For example, 218 | // functionGlobalContext: { os:require('os') } 219 | // can be accessed in a function block as: 220 | // global.get("os") 221 | functionGlobalContext: { 222 | // os:require('os'), 223 | // jfive:require("johnny-five"), 224 | // j5board:require("johnny-five").Board({repl:false}) 225 | }, 226 | // `global.keys()` returns a list of all properties set in global context. 227 | // This allows them to be displayed in the Context Sidebar within the editor. 228 | // In some circumstances it is not desirable to expose them to the editor. The 229 | // following property can be used to hide any property set in `functionGlobalContext` 230 | // from being list by `global.keys()`. 231 | // By default, the property is set to false to avoid accidental exposure of 232 | // their values. Setting this to true will cause the keys to be listed. 233 | exportGlobalContextKeys: false, 234 | 235 | 236 | // Context Storage 237 | // The following property can be used to enable context storage. The configuration 238 | // provided here will enable file-based context that flushes to disk every 30 seconds. 239 | // Refer to the documentation for further options: https://nodered.org/docs/api/context/ 240 | // 241 | //contextStorage: { 242 | // default: { 243 | // module:"localfilesystem" 244 | // }, 245 | //}, 246 | 247 | // The following property can be used to order the categories in the editor 248 | // palette. If a node's category is not in the list, the category will get 249 | // added to the end of the palette. 250 | // If not set, the following default order is used: 251 | //paletteCategories: ['subflows','flow','input','output','function','parser','social','mobile','storage','analysis','advanced'], 252 | 253 | // Configure the logging output 254 | logging: { 255 | // Only console logging is currently supported 256 | console: { 257 | // Level of logging to be recorded. Options are: 258 | // fatal - only those errors which make the application unusable should be recorded 259 | // error - record errors which are deemed fatal for a particular request + fatal errors 260 | // warn - record problems which are non fatal + errors + fatal errors 261 | // info - record information about the general running of the application + warn + error + fatal errors 262 | // debug - record information which is more verbose than info + info + warn + error + fatal errors 263 | // trace - record very detailed logging + debug + info + warn + error + fatal errors 264 | // off - turn off all logging (doesn't affect metrics or audit) 265 | level: "info", 266 | // Whether or not to include metric events in the log output 267 | metrics: false, 268 | // Whether or not to include audit events in the log output 269 | audit: false 270 | } 271 | }, 272 | 273 | // Customising the editor 274 | editorTheme: { 275 | projects: { 276 | // To enable the Projects feature, set this value to true 277 | enabled: false 278 | } 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # IoT Edge Offline Dashboarding 2 | 3 | This project provides a set of modules that can be used with Azure IoT Edge to perform dashboarding at the edge. 4 | 5 | The goal is to provide both guidance as well as a sample implementation to enable dashboards that run on the edge at sites in the field, while still sending data to the cloud for centralized reporting and monitoring. 6 | 7 | If you want to jump right into the sample implementation, please start [here](./documentation/dashboarding-sample.md). 8 | 9 | **Table of contents** 10 | 11 | * [Engage and contribute](#engage-and-contribute) 12 | * [Solution goals](#solution-goals) 13 | * [Solution architecture & components](#solution-architecture-&-components) 14 | * [Offline Dashboards sample](#offline-dashboards-sample) 15 | 16 | 17 | ## Engage and contribute 18 | 19 | * Ask questions about developing for Azure IoT Edge on [Stack Overflow](https://stackoverflow.com/questions/tagged/azure-iot-edge) using the **azure-iot-edge** tag. 20 | 21 | * Search for [known issues](https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding/issues) or file a [new issue](https://github.com/AzureIoTGBB/iot-edge-offline-dashboarding/issues/new) if you find something broken in this project. 22 | 23 | * Learn how you can contribute to this project [here](./documentation/contributing.md). 24 | 25 | * This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 26 | For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 27 | 28 | ## Solution goals 29 | 30 | The purpose of this solution is to provide both general purpose guidance for dashboarding on the edge as well as a sample implementation. While our sample implementation focuses on manufacturing, there are plenty of other potential use cases for this technology. Some examples include: 31 | 32 | * Retail stores that may need local dashboards for inventory or asset management 33 | * Warehouses that may need to manage the tracking and movement of product throughout the warehouse 34 | * Smart buildings who may need to manage energy or HVAC efficiency throughout the property 35 | * "Things that move" applications such as container or cruise ships that may need to operate for extended periods offline 36 | 37 | The main thing in common in these scenarios is the potential need to not only send important 'site' data to the cloud for centralized reporting and analytics, but also the ability to continue local operations in the event of an internet outage. 38 | 39 | The goal of this project is to demonstrate how this can be done for a specific manufacturing use case, but also give an example that can be re-used for other use cases by: 40 | 41 | * Replacing the data source(s) to be specific to the new use cases 42 | * Replacing the configuration files for the data ingestion and dashboards 43 | 44 | ## Solution architecture & components 45 | 46 | The architecture for this solution utilizes four main components in addition to Azure IoT Hub. 47 | 48 | * [Azure IoT Edge](https://docs.microsoft.com/en-us/azure/iot-edge/) is utilized to orchestrate and manage modules at the edge in addition to providing capabilities for offline operation and message routing. 49 | * [Node-RED](https://nodered.org/) is an open-source flow programming tool utilized to easily integrate and route messages from edge devices to InfluxDB. 50 | * [InfluxDB](https://www.influxdata.com/) is an open-source, time series database for storing device telemetry. 51 | * Lastly, [Grafana](https://grafana.com/) is an open-source analytics and dashboarding tool for visualizing device telemetry. 52 | 53 | This architecture and its components are intended to be general purpose and apply across several industries and use cases by simply switching out the data sources and dashboards. However, by far the customer segment where this need comes up the most often is manufacturing. Therefore, the sample implementation below focuses on that use case. 54 | 55 | ### Reasons for selecting this architecture 56 | 57 | The main purpose of this solution is to provide an ability for local operators to view dashboards at the edge regardless of whether the edge device was online or offline. This is a natural scenario that IoT Edge supports. To support dashboarding however, there was a need to also select both a storage component as well as a visualization component. 58 | 59 | #### Storage component 60 | 61 | Several storage solutions were reviewed and the team selected InfluxDB for the following reasons: 62 | 63 | * Influx DB is a time series DB and as such is a natural fit for telemetry data from devices 64 | * Open-source with a large community following 65 | * Supports plugin to Grafana 66 | * Node-RED libraries for easy integration 67 | * Quick time to value and can be deployed as a Docker container 68 | * Ranked #1 for time series DBs according to [DB-Engines](https://db-engines.com/en/system/InfluxDB) 69 | 70 | Although InfluxDB was chosen to support storage, other DBs were considered and could potentially be used as well. For example, [Graphite](http://graphiteapp.org/), [Prometheus](https://prometheus.io) and [Elasticsearch](https://www.elastic.co/de/) were also considered. [Azure Time Series Insights](https://azure.microsoft.com/en-us/services/time-series-insights) was also considered but at the time of this activity was not yet available on Azure IoT Edge. 71 | 72 | #### Visualization component 73 | 74 | Several visualization solutions were reviewed and the team selected Grafana for the following reasons: 75 | 76 | * Open-source with a large community following 77 | * This particular use case covers metric analysis vs log analysis 78 | * Flexibility with support for a wide array of plugins to different DBs and other supporting tools 79 | * Allows you to share dashboards across an organization 80 | * Quick time to value and can be deployed as a Docker container 81 | 82 | Although Grafana was chosen to support visualization and dashboarding, other tools were considered and could potentially be used as well. For example, [Kibana](https://www.elastic.co/kibana) may be a better fit for visualization and analyzing of log files and is a natural fit if working with Elasticsearch. [Chronograf](https://www.influxdata.com/time-series-platform/chronograf) was considered but was limited to InfluxDB as a data source. [PowerBI Report Server](https://powerbi.microsoft.com/en-us/report-server/) was also investigated, but lack of support for being able to containerize the PowerBI Report Server meant it could not be used directly with Azure IoT Edge. Additionally, PowerBI Report Server does not support the real-time "live" dashboarding required for this solution. 83 | 84 | #### Integration component 85 | 86 | Node-RED was chosen as the tool to ease integration between IoT Edge and InfluxDB. Although the integration component could be written in several programming languages and containerized, Node-RED was selected for the following reasons: 87 | 88 | * Open-source with a large community following 89 | * Readily available [nodes](https://flows.nodered.org/node/node-red-contrib-azure-iot-edge-kpm) for tapping into IoT Edge message routes 90 | * Readily available [nodes](https://flows.nodered.org/node/node-red-contrib-influxdb) for integrating and inserting data into InfluxDB as well as many other DBs 91 | * Large library of nodes to integrate with other tools and platforms 92 | * Easy flow-based programming allows manipulation and massaging of messages before inserted into a DB. 93 | * Can be deployed as a Docker container 94 | 95 | ## Offline Dashboards sample 96 | 97 | The "Offline Dashboards" sample is built upon [Azure IoT Edge](https://azure.microsoft.com/en-us/services/iot-edge/) technology. Azure IoT Edge is responsible for deploying and managing lifecycle of a set of modules (described later) that make up Offline Dashboards sample. 98 | 99 | ![Diagram showing the offline dashboard architecture](./media/OfflineDashboards_diag.png) 100 | 101 | Offline Dashboards runs on the IoT Edge device, continuously recording data that is sent from devices to IoT Hub. It contains 3 modules: 102 | 103 | 1. A Node-Red module that collects data from one or more data sources, in our case off of the edgeHub message bus, and writes that data into InfluxDB. 104 | 2. An InfluxDB module which stores data in time series structure. 105 | 3. A Grafana module which serves data from InfluxDB in dashboards. 106 | 107 | ![Diagram showing the Azure IoT Edge solution architecture](./media/OfflineDashboards_diag0.png) 108 | 109 | The sample implementation leverages data from two OPC-UA servers. For many reasons, [OPC-UA](https://opcfoundation.org/about/opc-technologies/opc-ua/) is Microsoft's recommended manufacturing integration technology, where possible. However, the OPC-UA publisher that generates data for the dashboard could be substituted with other data sources including Modbus, MQTT, or other custom protocols. 110 | 111 | Start learning about the actual sample implementation [here](./documentation/dashboarding-sample.md). 112 | --------------------------------------------------------------------------------