├── README.md
├── balena
├── 1.2
│ └── docker-compose.yml
├── 1.4
│ └── docker-compose.yml
├── 1.5
│ └── docker-compose.yml
├── 3.0
│ └── docker-compose.yml
├── 3.4
│ └── docker-compose.yml
├── 3.8
│ └── docker-compose.yml
└── README.md
├── docker-compose-2.0.yml
├── docker-compose-2.1-local.yml
├── docker-compose-2.1-sim.yml
├── docker-compose-3.x-local.yml
├── docker-compose-3.x-sim.yml
├── docker-compose.yml
├── examples
└── api
│ ├── bacnet
│ ├── v1
│ │ ├── create-multi-state-value.py
│ │ ├── device-scan.py
│ │ ├── object-scan.py
│ │ ├── read-property-multiple-post.py
│ │ ├── read-property-multiple.py
│ │ ├── update-multi-state-value.py
│ │ └── write-property.py
│ └── v2
│ │ ├── read-property-multiple.py
│ │ ├── read-property-schedule.py
│ │ ├── read-property.py
│ │ ├── read-range.py
│ │ ├── write-property-schedule.py
│ │ └── write-property.py
│ ├── command
│ └── v2
│ │ ├── get-commands.py
│ │ ├── read.py
│ │ ├── write-with-context.py
│ │ └── write.py
│ ├── helpers.py
│ ├── hpl
│ └── v1
│ │ ├── README.md
│ │ ├── download-csv.py
│ │ ├── get-points-device-id-fieldmask.py
│ │ ├── get-points-device-id.py
│ │ └── get-points.py
│ └── platform
│ └── v1
│ └── branding.py
├── ha
├── 00-deploy-system-dependencies.yml
├── 01-deploy-glusterfs.yml
├── 02-deploy-normal.yml
├── README.md
├── files
│ ├── 01-start-nf.sh
│ ├── 01-stop-nf.sh
│ ├── docker-compose-2.1-redis-ha.yaml.j2
│ ├── reconfigure.sh
│ └── sentinel.conf.j2
└── hosts.yml
├── iotedge
└── manifest-1.4.json
├── logo_nf.png
├── owl
└── model.ttl
├── solutions
└── idl
│ ├── assets
│ ├── favicon.ico
│ └── logo.svg
│ ├── dashboards
│ └── normal
│ │ ├── equipment-timeseries.json
│ │ ├── home.json
│ │ └── timeseries.json
│ ├── docker-compose.yml
│ ├── grafana.ini
│ ├── mosquitto.conf
│ ├── provisioning
│ ├── dashboards
│ │ └── normal-dashboard.yaml
│ └── datasources
│ │ └── tailscale-datasource.yaml
│ └── readme.md
├── sparkplug-historian
├── README
├── sparkplug-sql
├── tables.sql
└── views.sql
├── tools
├── modpoll
├── nfcli.py
└── restore.py
└── view
├── listener.json
└── podspec.json
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | Welcome to the NF SDK. This repository contains a `docker-compose`
4 | file which you can use to run a local copy of NF; and examples of
5 | using the REST API.
6 |
7 | For more information about see our main webpage and developer documentation.
8 |
9 | [Normal Framework](https://www.normal.dev) | [🔗 Portal](https://portal.normal-online.net) | [🔗 Developer Docs](https://docs2.normal.dev)
10 |
11 | Installation Instructions: Ubuntu
12 | -------------------------
13 |
14 | First, install Docker
15 | log into the Azure ACR repository using the credentials you obtained from Normal. These are available through the portal.
16 |
17 | ```
18 | $ sudo apt install docker docker-compose git
19 | $ sudo docker login -u -p normal.azurecr.io
20 | ```
21 |
22 | After that, clone this repository:
23 |
24 | ```
25 | $ git clone https://github.com/normalframework/nf-sdk.git
26 | $ cd nf-sdk
27 | ```
28 |
29 | Finally, pull the containers and start NF:
30 | ```
31 | $ sudo docker-compose pull
32 | $ sudo docker-compose up -d
33 | ```
34 |
35 | After this step is finished, you should be able to visit the
36 | management console at [http://localhost:8080](http://localhost:8080)
37 | on that machine.
38 |
39 | Do More
40 | =======
41 |
42 | Normal offers several pre-built integrations with other systems under permissive licenses. These can be quickly installed using our Application SDK.
43 |
44 | | Integration | Description | Read Data | Write Data | System Model | UX |
45 | | ----------- | ----------- | ----------- | ------------ | - | - |
46 | | [Application Template](https://github.com/normalframework/applications-template) | Starting point for new apps. Includes example hooks for testing point writeability and Postgres import | ✔️ | | |
47 | | [Desigo CC](https://github.com/normalframework/app-desigocc) | Retrieve data from a Desigo CC NORIS API | ✔️ | | |
48 | | [Archilogic](https://github.com/normalframework/app-archilogic) | Display data on a floor plan | | | | ✔️ |
49 | | [Guideline 36](https://github.com/normalframework/gl36-demo/tree/master) | Implement certain [Guideline 36](https://www.ashrae.org/news/ashraejournal/guideline-36-2021-what-s-new-and-why-it-s-important) sequences | | ✔️ | | ✔️ |
50 | | [Avuity](https://github.com/normalframework/avuity-integration) | Expose data from [Avuity](https://www.avuity.com) occupancy sensors as BACnet objects | ✔️ | | ✔️ | |
51 | | [ALC](https://github.com/normalframework/alc-plugin) | Import data from WebCTRL | | | ✔️ | |
52 | | [OPC](https://github.com/normalframework/opc-integration) | Connect to UPC-UA Servers | ✔️ | | | |
53 |
54 | Release Types
55 | =============
56 |
57 | As of version 3.8, two release types are available:
58 |
59 | * GA (general availability) releases are hosted in the `normal.azurecr.io` registry. These releases are "locked" and require a valid license to be entered before they can be used.
60 | * Enterprise releases are hosted in the previous `normalframework.azurecr.io`. In order to access these, you must have an master service agreement. These releases do not require activation to be used.
--------------------------------------------------------------------------------
/balena/1.2/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 |
3 | volumes:
4 | redis-db:
5 | jupyter-notebooks:
6 | spool-data:
7 | spool-errors:
8 | spool-commands:
9 | service-config:
10 | tailscale:
11 | tailscale-certs:
12 | templates:
13 |
14 | services:
15 | redis:
16 | image: normalframework/redis:1.2.2
17 | ports:
18 | - "6379:6379"
19 | volumes:
20 | - "redis-db:/data"
21 | networks:
22 | - internal
23 |
24 | envoy:
25 | image: normalframework/envoy:1.2.2
26 | networks:
27 | - internal
28 | ports:
29 | - "80:8080"
30 | - "443:4443"
31 | environment:
32 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
33 | volumes:
34 | - "tailscale-certs:/tailscale-certs"
35 |
36 | tailscale:
37 | image: normalframework/tailscale:1.2.2
38 | network_mode: host
39 | command: tailscaled
40 | devices:
41 | - /dev/net/tun:/dev/net/tun
42 | volumes:
43 | - "tailscale:/var/lib"
44 | - "tailscale-certs:/tailscale-certs"
45 | cap_add:
46 | - NET_ADMIN
47 | - NET_RAW
48 |
49 | admin-static:
50 | image: normalframework/nf-console:1.2.2
51 | networks:
52 | - internal
53 | depends_on:
54 | - redis
55 | environment:
56 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
57 |
58 | bacnet-service:
59 | image: normalframework/nf:1.2.2
60 | command: nf bacnet-hpl /etc/bacnet/config.pbjson
61 | network_mode: host
62 | volumes:
63 | - "service-config:/etc/bacnet"
64 | depends_on:
65 | - redis
66 | environment:
67 | - REDIS_ADDRESS=redis:6379
68 | - SERVICE_BIND_ADDRESS=10.114.104.1:9090
69 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
70 | - SCAN_SERVICE_ADDRESS=scan-service:9090
71 | - POLL_SERVICE_ADDRESS=poll-service:9090
72 | - POINT_SERVICE_ADDRESS=point-service:9090
73 |
74 | bacnet-status-service:
75 | image: normalframework/nf:1.2.2
76 | command: nf hpl-status
77 | networks:
78 | - internal
79 | depends_on:
80 | - redis
81 | - point-service
82 | environment:
83 | - SERVICE_BIND_ADDRESS=:9090
84 | - REDIS_ADDRESS=redis:6379
85 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
86 | - SCAN_SERVICE_ADDRESS=scan-service:9090
87 | - POLL_SERVICE_ADDRESS=poll-service:9090
88 | - POINT_SERVICE_ADDRESS=point-service:9090
89 |
90 | scan-service:
91 | image: normalframework/nf:1.2.2
92 | command: nf bacnet-scan
93 | networks:
94 | - internal
95 | depends_on:
96 | - redis
97 | - point-service
98 | environment:
99 | - SERVICE_BIND_ADDRESS=:9090
100 | - REDIS_ADDRESS=redis:6379
101 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
102 | - SCAN_SERVICE_ADDRESS=scan-service:9090
103 | - POLL_SERVICE_ADDRESS=poll-service:9090
104 | - POINT_SERVICE_ADDRESS=point-service:9090
105 |
106 | poll-service:
107 | image: normalframework/nf:1.2.2
108 | command: nf bacnet-poll
109 | networks:
110 | - internal
111 | depends_on:
112 | - redis
113 | - point-service
114 | environment:
115 | - REDIS_ADDRESS=redis:6379
116 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
117 | - SCAN_SERVICE_ADDRESS=scan-service:9090
118 | - POLL_SERVICE_ADDRESS=poll-service:9090
119 | - POINT_SERVICE_ADDRESS=point-service:9090
120 |
121 | point-service:
122 | image: normalframework/nf:1.2.2
123 | command: nf point
124 | networks:
125 | - internal
126 | depends_on:
127 | - redis
128 | ports:
129 | - "9093:9093"
130 | environment:
131 | - SERVICE_BIND_ADDRESS=:9090
132 | - REDIS_ADDRESS=redis:6379
133 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
134 | - SCAN_SERVICE_ADDRESS=scan-service:9090
135 | - POLL_SERVICE_ADDRESS=poll-service:9090
136 | - POINT_SERVICE_ADDRESS=point-service:9090
137 | - DATA_LOG_DIR=/data
138 | - ERROR_LOG_DIR=/errors
139 | volumes:
140 | - "spool-data:/data"
141 | - "spool-errors:/errors"
142 |
143 |
144 | sparkplug-service:
145 | image: normalframework/nf:1.2.2
146 | command: nf sparkplug
147 | networks:
148 | - internal
149 | depends_on:
150 | - point-service
151 | volumes:
152 | - "service-config:/versions"
153 | environment:
154 | - VERSION_STORE_DIR=/versions
155 | - POINT_SERVICE_ADDRESS=point-service:9090
156 |
157 | sparkplug-service-2:
158 | image: normalframework/nf:1.2.2
159 | command: nf sparkplug
160 | networks:
161 | - internal
162 | depends_on:
163 | - point-service
164 | volumes:
165 | - "service-config:/versions"
166 | environment:
167 | - VERSION_STORE_DIR=/versions
168 | - POINT_SERVICE_ADDRESS=point-service:9090
169 |
170 | platform-service:
171 | image: normalframework/nf:1.2.2
172 | command: nf platform
173 | networks:
174 | - internal
175 |
176 | command-service:
177 | image: normalframework/nf:1.2.2
178 | command: nf command
179 | networks:
180 | - internal
181 | depends_on:
182 | - point-service
183 | - bacnet-service
184 | - redis
185 | volumes:
186 | - "spool-commands:/commands"
187 | environment:
188 | - SERVICE_BIND_ADDRESS=:9090
189 | - REDIS_ADDRESS=redis:6379
190 | - POINT_SERVICE_ADDRESS=point-service:9090
191 | - BACNET_SERVICE_ADDRESS=10.114.104.1:9090
192 | - COMMAND_LOG_DIR=/command
193 |
194 | template-service:
195 | image: normalframework/nf:1.2.2
196 | command: nf template
197 | networks:
198 | - internal
199 | depends_on:
200 | - point-service
201 | - redis
202 | volumes:
203 | - "templates:/templates"
204 | environment:
205 | - SERVICE_BIND_ADDRESS=:9090
206 | - REDIS_ADDRESS=redis:6379
207 | - POINT_SERVICE_ADDRESS=point-service:9090
208 | - TEMPLATE_DIR=/templates
209 |
210 | networks:
211 | # the services all talk on this network which is only exposed through envoy
212 | internal:
213 |
214 | # the BACnet router routes between this bridge on the host and the
215 | # "real" physical interface.
216 |
217 |
--------------------------------------------------------------------------------
/balena/1.4/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 |
3 | volumes:
4 | redis-db:
5 | jupyter-notebooks:
6 | spool-data:
7 | spool-errors:
8 | spool-commands:
9 | service-config:
10 | tailscale:
11 | tailscale-certs:
12 | templates:
13 |
14 | services:
15 | redis:
16 | image: normalframework.azurecr.io/normalframework/redis:1.4
17 | ports:
18 | - "6379:6379"
19 | volumes:
20 | - "redis-db:/data"
21 |
22 | nf:
23 | image: normalframework.azurecr.io/normalframework/nf-full:1.4
24 | network_mode: host
25 | environment:
26 | - PORT=80
27 | depends_on:
28 | - redis
29 | volumes:
30 | - "tailscale-certs:/tailscale-certs"
31 | - "service-config:/var/nf/bacnet"
32 | - "spool-data:/var/nf/data"
33 | - "spool-errors:/var/nf/errors"
34 | - "service-config:/var/nf/sparklug"
35 | - "spool-commands:/var/nf/commands"
36 | - "templates:/var/nf/templates"
37 |
38 | tailscale:
39 | image: normalframework.azurecr.io/normalframework/tailscale:1.4
40 | network_mode: host
41 | command: tailscaled
42 | devices:
43 | - /dev/net/tun:/dev/net/tun
44 | volumes:
45 | - "tailscale:/var/lib"
46 | - "tailscale-certs:/tailscale-certs"
47 | cap_add:
48 | - NET_ADMIN
49 | - NET_RAW
50 |
51 | environment:
52 | - SERVICE_BIND_ADDRESS=:9090
53 | - REDIS_ADDRESS=redis:6379
54 | - POINT_SERVICE_ADDRESS=point-service:9090
55 | - TEMPLATE_DIR=/templates
56 |
57 |
--------------------------------------------------------------------------------
/balena/1.5/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 |
3 | volumes:
4 | redis-db:
5 | jupyter-notebooks:
6 | spool-data:
7 | spool-errors:
8 | spool-commands:
9 | service-config:
10 | tailscale:
11 | tailscale-certs:
12 | templates:
13 |
14 | services:
15 | redis:
16 | image: normalframework.azurecr.io/normalframework/redis:1.5
17 | ports:
18 | - "6379:6379"
19 | volumes:
20 | - "redis-db:/data"
21 |
22 | nf:
23 | image: normalframework.azurecr.io/normalframework/nf-full:1.5
24 | network_mode: host
25 | cap_add:
26 | - NET_BIND_SERVICE
27 | environment:
28 | - PORT=80
29 | depends_on:
30 | - redis
31 | volumes:
32 | - "tailscale-certs:/tailscale-certs"
33 | - "service-config:/var/nf/bacnet"
34 | - "spool-data:/var/nf/data"
35 | - "spool-errors:/var/nf/errors"
36 | - "service-config:/var/nf/sparklug"
37 | - "service-config:/var/nf/modbus"
38 | - "spool-commands:/var/nf/commands"
39 | - "templates:/var/nf/templates"
40 |
41 | tailscale:
42 | image: normalframework.azurecr.io/normalframework/tailscale:1.5
43 | network_mode: host
44 | command: tailscaled
45 | devices:
46 | - /dev/net/tun:/dev/net/tun
47 | volumes:
48 | - "tailscale:/var/lib"
49 | - "tailscale-certs:/tailscale-certs"
50 | cap_add:
51 | - NET_ADMIN
52 | - NET_RAW
53 |
54 | environment:
55 | - SERVICE_BIND_ADDRESS=:9090
56 | - REDIS_ADDRESS=redis:6379
57 | - POINT_SERVICE_ADDRESS=point-service:9090
58 | - TEMPLATE_DIR=/templates
59 |
60 |
--------------------------------------------------------------------------------
/balena/3.0/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 |
3 | volumes:
4 | redis-db:
5 | nf-var:
6 |
7 | services:
8 | redis:
9 | image: normalframework.azurecr.io/normalframework/redis:3.1.0-0
10 | ports:
11 | - "6379:6379"
12 | volumes:
13 | - "redis-db:/data"
14 |
15 | nf:
16 | image: normalframework.azurecr.io/normalframework/nf-full:3.1.0-0
17 | network_mode: host
18 | privileged: true
19 | environment:
20 | - PORT=80
21 | depends_on:
22 | - redis
23 | volumes:
24 | - "nf-var:/var/nf"
25 |
26 |
--------------------------------------------------------------------------------
/balena/3.4/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.1"
2 |
3 | volumes:
4 | redis-db:
5 | nf-var:
6 | openvpn:
7 |
8 | services:
9 | redis:
10 | image: normalframework.azurecr.io/normalframework/redis:3.4.0-7
11 | ports:
12 | - "6379:6379"
13 | volumes:
14 | - "redis-db:/data"
15 |
16 | nf:
17 | image: normalframework.azurecr.io/normalframework/nf-full:3.4.0-7
18 | network_mode: host
19 | privileged: true
20 | environment:
21 | - PORT=80
22 | depends_on:
23 | - redis
24 | volumes:
25 | - "nf-var:/var/nf"
26 |
27 |
--------------------------------------------------------------------------------
/balena/3.8/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2.1"
2 |
3 | volumes:
4 | redis-db:
5 | nf-var:
6 |
7 | services:
8 | redis:
9 | image: normalframework.azurecr.io/normalframework/redis:3.8.2-0
10 | ports:
11 | - "6379:6379"
12 | volumes:
13 | - "redis-db:/data"
14 |
15 | nf:
16 | image: normalframework.azurecr.io/normalframework/nf-full:3.8.2-0
17 | network_mode: host
18 | privileged: true
19 | environment:
20 | - PORT=80
21 | depends_on:
22 | - redis
23 | volumes:
24 | - "nf-var:/var/nf"
25 |
26 |
--------------------------------------------------------------------------------
/balena/README.md:
--------------------------------------------------------------------------------
1 | Docker Compose files for Balena
2 | -------------------------------
3 |
4 | This directory has baseline docker file for when NF is deployed using
5 | Balena. There are some small differences between Balena's platform
6 | and normal docker-compose which make it desirable to have separate
7 | configuration.
--------------------------------------------------------------------------------
/docker-compose-2.0.yml:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a mostly functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs entirely in a docker internal network, along
23 | # with a simple BACnet simulator.
24 |
25 | version: "2.2"
26 | services:
27 |
28 | # the main container which runs all the NF services, load balancer,
29 | # and management console.
30 | nf:
31 | image: normalframework.azurecr.io/normalframework/nf-full:develop
32 | ports:
33 | - "8080:8080"
34 | networks:
35 | - internal
36 | depends_on:
37 | - redis
38 | tmpfs:
39 | - /run:exec
40 | - /etc/nginx/conf.d/
41 | - /tmp
42 | ports:
43 | - "8080:8080"
44 | environment:
45 | - REDIS_ADDRESS=redis:6379
46 |
47 | # if you want data to be persistent, you need to mount /var in the container
48 | # a volume
49 | volumes:
50 | - /tmp/nf:/var
51 |
52 | # Most persistent data is in redis. This is essentially an
53 | # unmodified upstream with RediSearch and RedisTimeseries installed.
54 | redis:
55 | image: normalframework.azurecr.io/normalframework/redis:develop
56 | networks:
57 | - internal
58 | volumes:
59 | - /tmp/nf-redis:/data
60 |
61 | bacnet-simulator:
62 | image: stevedh/bactools
63 | command: server
64 |
65 | networks:
66 | - internal
67 | environment:
68 | - BACNET_IP_DEBUG=1
69 |
70 | modbus-simulator:
71 | image: normalframework.azurecr.io/normalframework/modbus-sim:develop
72 | networks:
73 | - internal
74 |
75 | networks:
76 | internal:
77 |
78 |
--------------------------------------------------------------------------------
/docker-compose-2.1-local.yml:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs attached to the local network in host network
23 | # mode. This is required for BACnet/IP broadcast traffic to work correctly
24 | # on the local network
25 |
26 |
27 | version: "2.2"
28 | services:
29 |
30 | # the main container which runs all the NF services, load balancer,
31 | # and management console.
32 | nf:
33 | image: normalframework.azurecr.io/normalframework/nf-full:2.1.4-2
34 | network_mode: host
35 | depends_on:
36 | - redis
37 | tmpfs:
38 | - /run:exec
39 | - /etc/nginx/conf.d/
40 | - /tmp
41 |
42 | # if you want data to be persistent, you need to mount /var in the container
43 | # a volume
44 | # volumes:
45 | # - /tmp/nf:/var
46 |
47 | # Most persistent data is in redis. This is essentially an
48 | # unmodified upstream with RediSearch and RedisTimeseries installed.
49 | redis:
50 | ports:
51 | - "6379:6379"
52 | image: normalframework.azurecr.io/normalframework/redis:2.1.4-2
53 | volumes:
54 | - /home/stevedh/src/git/gobac/datasets/edo-redis/:/data
55 |
56 |
57 |
--------------------------------------------------------------------------------
/docker-compose-2.1-sim.yml:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a mostly functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs entirely in a docker internal network, along
23 | # with a simple BACnet simulator.
24 |
25 | version: "2.2"
26 | services:
27 |
28 | # the main container which runs all the NF services, load balancer,
29 | # and management console.
30 | nf:
31 | image: normalframework.azurecr.io/normalframework/nf-full:2.1
32 | networks:
33 | - internal
34 | depends_on:
35 | - redis
36 | tmpfs:
37 | - /run:exec
38 | - /etc/nginx/conf.d/
39 | - /tmp
40 | ports:
41 | - "8080:8080"
42 | environment:
43 | - REDIS_ADDRESS=redis:6379
44 |
45 | # if you want data to be persistent, you need to mount /var in the container
46 | # a volume
47 | # volumes:
48 | # - /tmp/nf:/var
49 |
50 | # Most persistent data is in redis. This is essentially an
51 | # unmodified upstream with RediSearch and RedisTimeseries installed.
52 | redis:
53 | image: normalframework.azurecr.io/normalframework/redis:2.1
54 | networks:
55 | - internal
56 | # volumes:
57 | # - /tmp/nf-redis:/data
58 |
59 | bacnet-simulator:
60 | image: stevedh/bactools
61 | command: server
62 |
63 | networks:
64 | - internal
65 | environment:
66 | - BACNET_IP_DEBUG=1
67 |
68 | modbus-simulator:
69 | image: normalframework.azurecr.io/normalframework/modbus-sim:2.1
70 | networks:
71 | - internal
72 |
73 | networks:
74 | internal:
75 |
76 |
--------------------------------------------------------------------------------
/docker-compose-3.x-local.yml:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs attached to the local network in host network
23 | # mode. This is required for BACnet/IP broadcast traffic to work correctly
24 | # on the local network
25 |
26 |
27 | version: "2.2"
28 | services:
29 |
30 | # the main container which runs all the NF services, load balancer,
31 | # and management console.
32 | nf:
33 | image: normal.azurecr.io/normalframework/nf-full:3.9
34 |
35 | # use this if you have access to an enterprise license
36 | # image: normalframework.azurecr.io/normalframework/nf-full:3.8
37 |
38 | # a debug version of each release is available for enterprise
39 | # customers, which has additional packages for debugging installed
40 | # (like nmap, tcpdump, and python), and also runs an ssh server on
41 | # port 22 with a default username/password of root/password.
42 | # don't use in production!
43 | # image: normalframework.azurecr.io/normalframework/nf-debug:3.8
44 | network_mode: host
45 | depends_on:
46 | - redis
47 |
48 | ##
49 | ## Other configuration options
50 | ## Depending on your environment, you may need to enable and adjust some of these options.
51 | ## For production use, most of these should be configured and set appropriately.
52 |
53 | ## set a persistent volume for NF data. required or else state will disappear after a restart
54 | volumes:
55 | - /var/nf:/var/nf
56 | - /etc/timezone:/etc/timezone:ro
57 | - /etc/localtime:/etc/localtime:ro
58 |
59 |
60 | ## have docker always restart the container if it crashes
61 | restart: unless-stopped
62 |
63 | ## Set memory and CPU limits so Linux kills the container instead of locking up
64 | # mem_limit: 1024m
65 | # cpus: 2.0
66 |
67 | ## If these options are uncommented, NF will install iptables
68 | ## firewall rules to disallow access to redis and on the specified
69 | ## interface.
70 | #
71 | # cap_add:
72 | # - NET_ADMIN
73 | # environment:
74 | # - NORMAL_FIREWALL_INTERFACE=enp1s0
75 |
76 | ## Enable these options to mount the container read-only. This is
77 | ## probably more secure but prevents you from making changes
78 | ## within the container (other than what is on a volume mount)
79 | # read_only: true
80 | # tmpfs:
81 | # - /run:exec
82 | # - /etc/nginx/conf.d/
83 | # - /tmp
84 | # environment:
85 | # - S6_READ_ONLY_ROOT=1
86 |
87 | ## If Normal will be placed behind a reverse proxy such as a WAF,
88 | ## set the path it will be accessed at here so that links will be generated correctly.
89 | # environment:
90 | # - APPLICATION_PATH=/http-8080/localhost
91 |
92 |
93 | # Most persistent data is in redis. This is essentially an
94 | # unmodified upstream with RediSearch and RedisTimeseries installed.
95 | redis:
96 | ports:
97 | - "6379:6379"
98 | image: normal.azurecr.io/normalframework/redis:3.9
99 | # image: normalframework.azurecr.io/normalframework/redis:3.8
100 |
101 | ## set this or else the point database will not persist
102 | volumes:
103 | - /var/nf-redis:/data
104 |
105 | restart: unless-stopped
106 |
107 | ## Set memory and CPU limits so Linux kills the container instead of locking up
108 | # mem_limit: 1024m
109 | # cpus: 2.0
110 |
--------------------------------------------------------------------------------
/docker-compose-3.x-sim.yml:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs attached to the local network in host network
23 | # mode. This is required for BACnet/IP broadcast traffic to work correctly
24 | # on the local network
25 |
26 |
27 | version: "2.2"
28 | services:
29 |
30 | # the main container which runs all the NF services, load balancer,
31 | # and management console.
32 | nf:
33 | image: normalframework.azurecr.io/normalframework/nf-full:3.8
34 | ports:
35 | - 8080:8080
36 | depends_on:
37 | - redis
38 | tmpfs:
39 | - /run:exec
40 | - /etc/nginx/conf.d/
41 | - /tmp
42 | environment:
43 | - REDIS_ADDRESS=redis:6379
44 | # set it behind a reverse proxy
45 | # - APPLICATION_PATH=/http-8080/localhost
46 |
47 | # set to enable username/password on the admin console + API
48 | # - CONSOLE_USERNAME=admin
49 | # - CONSOLE_PASSWORD=pw
50 | networks:
51 | - internal
52 |
53 | # if you want data to be persistent, you need to mount /var in the container
54 | # a volume
55 | # volumes:
56 | # - /tmp/nf:/var
57 |
58 | # Most persistent data is in redis. This is essentially an
59 | # unmodified upstream with RediSearch and RedisTimeseries installed.
60 | redis:
61 | image: normalframework.azurecr.io/normalframework/redis:3.8
62 | volumes:
63 | - /tmp/nf-redis:/data
64 | networks:
65 | - internal
66 |
67 |
68 | bacnet-simulator:
69 | image: stevedh/bactools
70 | command: server
71 |
72 | networks:
73 | - internal
74 | environment:
75 | - BACNET_IP_DEBUG=1
76 |
77 | networks:
78 | internal:
79 |
80 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | docker-compose-3.x-local.yml
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/create-multi-state-value.py:
--------------------------------------------------------------------------------
1 | """Create A MultiState Value Object
2 |
3 | This example shows how to also create the State_Text property which
4 | defines the values of the enumeration.
5 | """
6 | import sys
7 | sys.path.append("../..")
8 |
9 | from helpers import NfClient, print_response
10 |
11 | client = NfClient()
12 |
13 | res = client.post("/api/v1/bacnet/local", {
14 | "object_id": {
15 | "object_type": "OBJECT_MULTI_STATE_VALUE",
16 | "instance": 0, # create a new object
17 | },
18 | "props": [
19 | {
20 | "property": "PROP_OBJECT_NAME",
21 | "value": {
22 | "character_string": "Example MSV",
23 | },
24 | },
25 | {
26 | "property": "PROP_UNITS",
27 | "value": {
28 | "enumeration": "85", # look up enum value of your units
29 | },
30 | },
31 | {
32 | "property": "PROP_OUT_OF_SERVICE",
33 | "value": {
34 | "boolean": False,
35 | },
36 | },
37 | {
38 | "property": "PROP_PRESENT_VALUE",
39 | "value": {
40 | "enumerated": 2,
41 | },
42 | },
43 | {
44 | "property": "PROP_NUMBER_OF_STATES",
45 | "value": {
46 | "unsigned": 3,
47 | },
48 | },
49 | {
50 | "property": "PROP_STATE_TEXT",
51 | "value": {
52 | "array": [
53 | {
54 | "character_string": "State 1",
55 | },
56 | {
57 | "character_string": "State 2",
58 | },
59 | {
60 | "character_string": "State 3",
61 | },
62 | ]
63 | },
64 | }
65 | ]
66 | })
67 | print_response(res)
68 |
69 |
70 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/device-scan.py:
--------------------------------------------------------------------------------
1 | """Device discovery
2 |
3 | This example will initiate a device scan which does not scan any found
4 | devices. It will wait for the scan to complete, and print any
5 | discovered devices.
6 | """
7 | import time
8 | import sys
9 | sys.path.append("../..")
10 |
11 | from helpers import NfClient, print_response
12 |
13 | client = NfClient()
14 |
15 | res = client.post("/api/v1/bacnet/scan", json={
16 | "autoImport": False, # set to true to update object database
17 | "device": {
18 | "autoScan": True, # force a new scan of the device even if we've previously scanned it
19 | "targets": [
20 | {
21 | # set to 0 for global scan
22 | "lowLimit": 260001,
23 | "highLimit": 260001,
24 | },
25 | ],
26 | },
27 | })
28 | print_response(res)
29 | scan_id = res.json()["id"]
30 |
31 | print("waiting for device scan to complete...")
32 | def wait_on_job(scan_id):
33 | for _ in range(0, 100):
34 | time.sleep(.1)
35 | res = client.get("/api/v1/bacnet/scan", params={
36 | "idFilter": scan_id,
37 | "full": True,
38 | })
39 | scan = res.json()
40 | if scan["results"][0]["status"] not in ["PENDING", "RUNNING"]:
41 | print_response(res)
42 | break
43 |
44 | wait_on_job(scan_id)
45 |
46 | # now query on parentIdFilter=scan_id to get the object lists from any discovered devices
47 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/object-scan.py:
--------------------------------------------------------------------------------
1 | """Device discovery
2 |
3 | This example will initiate a device scan which does not scan any found
4 | devices. It will wait for the scan to complete, and print any
5 | discovered devices.
6 | """
7 | import time
8 | import sys
9 | import base64
10 | sys.path.append("../..")
11 |
12 | from helpers import NfClient, print_response
13 |
14 | client = NfClient()
15 |
16 | device_adr = [10, 0, 1, 5, 0xba, 0xc0]
17 |
18 | res = client.post("/api/v1/bacnet/scan", json={
19 | "parentId": 1244,
20 | "object": {
21 | "target": {
22 | "mac": "CmUoGbrA",
23 | "net": 30000,
24 | "adr": "Ag==",
25 | "maxApdu": 480,
26 | "deviceId": 300002,
27 | "bbmd": "",
28 | "portId": 0
29 | },
30 | "properties": ["PROP_OBJECT_NAME", "PROP_UNITS"],
31 | "objectTypes": [
32 | "OBJECT_BINARY_VALUE",
33 | "OBJECT_DEVICE"
34 | ],
35 | "ifMissing": "DELETE"
36 | },
37 | "autoImport": True,
38 | })
39 |
40 | print_response(res)
41 | scan_id = res.json()["id"]
42 |
43 | print("waiting for device scan to complete...")
44 | def wait_on_job(scan_id):
45 | for _ in range(0, 100):
46 | time.sleep(.1)
47 | res = client.get("/api/v1/bacnet/scan", params={
48 | "idFilter": scan_id,
49 | "full": True,
50 | })
51 | scan = res.json()
52 | if scan["results"][0]["status"] not in ["PENDING", "RUNNING"]:
53 | print_response(res)
54 | break
55 |
56 | wait_on_job(scan_id)
57 |
58 | # now query on parentIdFilter=scan_id to get the object lists from any discovered devices
59 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/read-property-multiple-post.py:
--------------------------------------------------------------------------------
1 | """Perform a Read Property Multiple
2 |
3 | This is only supported in >= 2.1
4 | """
5 | import base64
6 | import sys
7 | sys.path.append("../..")
8 |
9 | from helpers import NfClient, print_response
10 |
11 | client = NfClient()
12 |
13 | # the IP + port of the device to read from, if not using using dynamic binding
14 | # device_adr = [192,168,103,178,0xba, 0xc0]
15 |
16 | res = client.post("/api/v1/bacnet/readpropertymultiple", json={
17 | 'device_address':{
18 | 'device_id': 260001,
19 | #'mac': base64.b64encode(bytes(device_adr)).decode("ascii"),
20 | # use net and addr if a routed connection
21 | #'net': 0,
22 | #'adr':
23 | },
24 | 'read_properties': [
25 | {
26 | 'object_id': {
27 | 'object_type': "OBJECT_ANALOG_OUTPUT",
28 | 'instance': 1,
29 | },
30 | 'property_id': 'PROP_PRESENT_VALUE',
31 | 'array_index': 4294967295,
32 | },
33 | {
34 | 'object_id': {
35 | 'object_type': "OBJECT_ANALOG_OUTPUT",
36 | 'instance': 1,
37 | },
38 | 'property_id': 'PROP_PRIORITY_ARRAY',
39 | 'array_index': 4294967295,
40 | }
41 | ]
42 | })
43 |
44 | print_response(res)
45 |
46 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/read-property-multiple.py:
--------------------------------------------------------------------------------
1 | """Perform a Read Property Multiple
2 |
3 | Pre 2.1, the ReadProperty-Multiple endpoint only accepted GET requests
4 | which means that all arguments must be sent as query params.
5 |
6 | This unfortunately means that only reading one item per request is
7 | possible.
8 | """
9 | import sys
10 | sys.path.append("../..")
11 |
12 | from helpers import NfClient, print_response
13 |
14 | client = NfClient()
15 |
16 | res = client.get("/api/v1/bacnet/readpropertymultiple", params={
17 | 'device_address.device_id': 260001,
18 | #'device_address.mac': base64.b64encode(bytes(device_adr)),
19 | # use net and addr if a routed connection
20 | #'device_address.net': 0,
21 | #'device_address.adr': "",
22 |
23 | "read_properties.object_id.object_type": "OBJECT_ANALOG_VALUE",
24 | "read_properties.object_id.instance": 1,
25 | "read_properties.property_id": "PROP_PRESENT_VALUE",
26 | "read_properties.array_index": 4294967295, # BACNET_ARRAY_ALL
27 |
28 | })
29 | print_response(res)
30 |
31 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/update-multi-state-value.py:
--------------------------------------------------------------------------------
1 | """Create A MultiState Value Object
2 |
3 | This example shows how to also create the State_Text property which
4 | defines the values of the enumeration.
5 | """
6 | import sys
7 | sys.path.append("../..")
8 |
9 | from helpers import NfClient, print_response
10 |
11 | client = NfClient()
12 |
13 | res = client.patch("/api/v1/bacnet/local", {
14 | "object_id": {
15 | "object_type": "OBJECT_MULTI_STATE_VALUE",
16 | "instance": 1, # create a new object
17 | },
18 | "props": [
19 | {
20 | "property": "PROP_PRESENT_VALUE",
21 | "value": {
22 | "enumerated": 1,
23 | },
24 | },
25 | ]
26 | })
27 | print_response(res)
28 |
29 |
30 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v1/write-property.py:
--------------------------------------------------------------------------------
1 | """Perform a BACnet write-property request
2 |
3 | Note array_index=4294967295 is used for "BACNET_ARRAY_ALL"
4 | """
5 |
6 | import base64
7 | import sys
8 | sys.path.append("../..")
9 |
10 | from helpers import NfClient, print_response
11 |
12 | client = NfClient()
13 |
14 | # device_adr = [192,168,103,178,0xba, 0xc0]
15 |
16 | res = client.post("/api/v1/bacnet/writeproperty", json={
17 | 'device_address': {
18 | 'device_id': 260001,
19 | # 'mac': base64.b64encode(bytes(device_adr)),
20 | # use net and addr if a routed connection
21 | #'net': 0,
22 | #'adr':
23 | },
24 | 'property':{
25 | 'object_id': {
26 | 'object_type': 'OBJECT_BINARY_VALUE',
27 | 'instance': 1,
28 | },
29 | 'property_id': 'PROP_PRESENT_VALUE',
30 | 'array_index':4294967295,
31 | },
32 | 'value':{
33 | 'enumerated': 1,
34 | },
35 | 'priority':12,
36 | })
37 | print_response(res)
38 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/read-property-multiple.py:
--------------------------------------------------------------------------------
1 | """ReadProperty-Multiple example of reading three properties from a single object.
2 | """
3 | import sys
4 | sys.path.append("../..")
5 |
6 | from helpers import NfClient, print_response
7 | client = NfClient()
8 |
9 | # device_adr = [8, 27,220, 10, 0xba, 0xc1]
10 |
11 | res = client.post("/api/v2/bacnet/confirmed-service", json={
12 | "device_address": {
13 | "deviceId": 260001,
14 | },
15 | "request": {
16 | "read_property_multiple": {
17 | "list_of_read_access_specifications": [
18 | {
19 | "object_identifier": {
20 | "object_type": "OBJECT_TYPE_TREND_LOG",
21 | "instance":1,
22 | },
23 | "list_of_property_references": [
24 | {
25 | "property_identifier": "PROPERTY_IDENTIFIER_UNITS",
26 | "property_array_index": 4294967295,
27 | },
28 | {
29 | "property_identifier": "PROPERTY_IDENTIFIER_TOTAL_RECORD_COUNT",
30 | "property_array_index": 4294967295,
31 | },
32 | {
33 | "property_identifier": "PROPERTY_IDENTIFIER_BUFFER_SIZE",
34 | "property_array_index": 4294967295,
35 | }
36 | ],
37 | }
38 | ]
39 | },
40 | }
41 | },)
42 | print_response(res)
43 |
44 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/read-property-schedule.py:
--------------------------------------------------------------------------------
1 | """Read a schedule object's Weekly Schedule property
2 |
3 | Example output:
4 | {
5 | "ack": {
6 | "readProperty": {
7 | "objectIdentifier": {
8 | "objectType": "OBJECT_TYPE_SCHEDULE",
9 | "instance": 1
10 | },
11 | "propertyIdentifier": "PROPERTY_IDENTIFIER_WEEKLY_SCHEDULE",
12 | "propertyArrayIndex": 4294967295,
13 | "propertyValue": {
14 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ListOfDailySchedule",
15 | ]
16 | },
17 | {
18 | "daySchedule": [
19 | {}
20 | ]
21 | },
22 | {
23 | "daySchedule": [
24 | {}
25 | ]
26 | },
27 | {
28 | "daySchedule": [
29 | {}
30 | ]
31 | },
32 | {
33 | "daySchedule": [
34 | {}
35 | ]
36 | },
37 | {
38 | "daySchedule": [
39 | {}
40 | ]
41 | },
42 | {
43 | "daySchedule": [
44 | {}
45 | ]
46 | }
47 | ]
48 | }
49 | }
50 | }
51 | }
52 | """
53 | import sys
54 | sys.path.append("../..")
55 |
56 | from helpers import NfClient, print_response
57 | client = NfClient()
58 |
59 | # device_adr = [192,168,103,178,0xba, 0xc0]
60 |
61 | res = client.post("/api/v2/bacnet/confirmed-service", json={
62 | "device_address": {
63 | "device_id": 260001,
64 | # "mac": base64.b64encode(bytes(device_adr)),
65 | },
66 | "request": {
67 | "read_property": {
68 | "object_identifier": {
69 | "object_type": "OBJECT_TYPE_SCHEDULE",
70 | "instance":1,
71 | },
72 | "property_identifier": "PROPERTY_IDENTIFIER_WEEKLY_SCHEDULE",
73 | "property_array_index": 4294967295,
74 | },
75 |
76 | }
77 | },)
78 | print_response(res)
79 |
80 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/read-property.py:
--------------------------------------------------------------------------------
1 | """ReadProperty example
2 |
3 | Read the units property of ai.1. Note the enumerated value is converted.
4 |
5 | {
6 | "ack": {
7 | "readProperty": {
8 | "objectIdentifier": {
9 | "objectType": "OBJECT_TYPE_ANALOG_INPUT",
10 | "instance": 1
11 | },
12 | "propertyIdentifier": "PROPERTY_IDENTIFIER_UNITS",
13 | "propertyArrayIndex": 4294967295,
14 | "propertyValue": {
15 | "@type": "type.googleapis.com/normalgw.bacnet.v2.EngineeringUnitsMessage",
16 | "engineeringUnits": "ENGINEERING_UNITS_PERCENT"
17 | }
18 | }
19 | }
20 | }
21 | """
22 | import sys
23 | sys.path.append("../..")
24 |
25 | from helpers import NfClient, print_response
26 | client = NfClient()
27 | # device_adr = [192,168,103,178,0xba, 0xc0]
28 |
29 | res = client.post("/api/v2/bacnet/confirmed-service", json={
30 | "device_address": {
31 | "device_id": 260001,
32 | # "mac": base64.b64encode(bytes(device_adr)),
33 | },
34 | "request": {
35 | "read_property": {
36 | "object_identifier": {
37 | "object_type": "OBJECT_TYPE_ANALOG_INPUT",
38 | "instance":1,
39 | },
40 | "property_identifier": "PROPERTY_IDENTIFIER_UNITS",
41 | "property_array_index": 4294967295,
42 | },
43 |
44 | }
45 | },)
46 | print_response(res)
47 |
48 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/read-range.py:
--------------------------------------------------------------------------------
1 | """ReadRange Example
2 |
3 | Read a trend log. Note for this example you may need to edit or remove
4 | FirstSequenceNumber in order to start reading the beginning of the log
5 |
6 | {
7 | "ack": {
8 | "readRange": {
9 | "objectIdentifier": {
10 | "objectType": "OBJECT_TYPE_TREND_LOG",
11 | "instance": 1
12 | },
13 | "propertyIdentifier": "PROPERTY_IDENTIFIER_LOG_BUFFER",
14 | "propertyArrayIndex": 0,
15 | "resultFlags": {
16 | "length": 3,
17 | "setBits": []
18 | },
19 | "itemCount": 5,
20 | "itemData": {
21 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ListOfLogRecord",
22 | "listOfLogRecord": [
23 | {
24 | "timestamp": {
25 | "date": {
26 | "year": 109,
27 | "month": 2,
28 | "day": 1,
29 | "wday": 7
30 | },
31 | "time": {
32 | "hour": 0,
33 | "minute": 15,
34 | "second": 0,
35 | "hundreth": 0
36 | }
37 | },
38 | "logDatum": {
39 | "realValue": 1001
40 | }
41 | },
42 | {
43 | "timestamp": {
44 | "date": {
45 | "year": 109,
46 | "month": 2,
47 | "day": 1,
48 | "wday": 7
49 | },
50 | "time": {
51 | "hour": 0,
52 | "minute": 30,
53 | "second": 0,
54 | "hundreth": 0
55 | }
56 | },
57 | "logDatum": {
58 | "realValue": 1002
59 | }
60 | },
61 | {
62 | "timestamp": {
63 | "date": {
64 | "year": 109,
65 | "month": 2,
66 | "day": 1,
67 | "wday": 7
68 | },
69 | "time": {
70 | "hour": 0,
71 | "minute": 45,
72 | "second": 0,
73 | "hundreth": 0
74 | }
75 | },
76 | "logDatum": {
77 | "realValue": 1003
78 | }
79 | },
80 | {
81 | "timestamp": {
82 | "date": {
83 | },
84 | "time": {
85 | "hour": 1,
86 | "minute": 0,
87 | "second": 0,
88 | "hundreth": 0
89 | }
90 | },
91 | "logDatum": {
92 | "realValue": 1004
93 | }
94 | },
95 | {
96 | "timestamp": {
97 | "date": {
98 | "year": 109,
99 | "month": 2,
100 | "day": 1,
101 | "wday": 7
102 | },
103 | "time": {
104 | "hour": 1,
105 | "minute": 15,
106 | "second": 0,
107 | "hundreth": 0
108 | }
109 | },
110 | "logDatum": {
111 | "realValue": 1005
112 | }
113 | }
114 | ]
115 | },
116 | "firstSequenceNumber": 9002
117 | }
118 | }
119 | }
120 |
121 | """
122 | import sys
123 | sys.path.append("../..")
124 |
125 | from helpers import NfClient, print_response
126 | client = NfClient()
127 |
128 | #device_adr = [8, 27,220, 10, 0xba, 0xc1]
129 |
130 | res = client.post("/api/v2/bacnet/confirmed-service", json={
131 | "device_address": {
132 | "deviceId": 260001,
133 | },
134 | "request": {
135 | "read_range": {
136 | "object_identifier": {
137 | "object_type": "OBJECT_TYPE_TREND_LOG",
138 | "instance":1,
139 | },
140 | "property_identifier": "PROPERTY_IDENTIFIER_LOG_BUFFER",
141 | "property_array_index": 4294967295,
142 | "by_position": {
143 | "reference_index": 1,
144 | "count": 5,
145 | }
146 | }
147 | }
148 | },)
149 | print_response(res)
150 |
151 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/write-property-schedule.py:
--------------------------------------------------------------------------------
1 | """Read and write a daily schedule object.
2 |
3 | NB: this may not work on many controllers, and successfully writing
4 | the schedule requires understanding what valid values are possible for
5 | each schedule event.
6 | """
7 | import sys
8 | sys.path.append("../..")
9 |
10 | from helpers import NfClient, print_response
11 | client = NfClient()
12 |
13 | #device_adr = [192,168,103,178,0xba, 0xc0]
14 |
15 | read_request = {
16 | "deviceAddress": {
17 | "deviceId": 260001
18 | # "mac": base64.b64encode(bytes(device_adr)),
19 | },
20 | "request": {
21 | "readProperty": {
22 | "objectIdentifier": {
23 | "objectType": "OBJECT_TYPE_SCHEDULE",
24 | "instance": 1
25 | },
26 | "propertyIdentifier": "PROPERTY_IDENTIFIER_WEEKLY_SCHEDULE",
27 | "propertyArrayIndex": 4294967295
28 | }
29 | }
30 | }
31 | res = client.post("/api/v2/bacnet/confirmed-service", json=read_request)
32 | print_response(res)
33 |
34 |
35 | # writeproperty
36 | write_request = {
37 | "deviceAddress": {
38 | # base64 IP + port for BACnet/ip
39 | # "mac": base64.b64encode(bytes(device_adr)),
40 | "deviceId": 260001,
41 | },
42 | "request": {
43 | "writeProperty": {
44 | "objectIdentifier": {
45 | "objectType": "OBJECT_TYPE_SCHEDULE",
46 | "instance": 1
47 | },
48 | "propertyIdentifier": "PROPERTY_IDENTIFIER_WEEKLY_SCHEDULE",
49 | "propertyArrayIndex": 2,
50 | "propertyValue": {
51 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ListOfDailySchedule",
52 | "listOfDailySchedule": [
53 | {
54 | "daySchedule": [
55 | {
56 | "time": {
57 | "hour": 6
58 | },
59 | "value": {
60 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
61 | "enumerated": 2
62 | }
63 | }
64 | ]
65 | }
66 | ]
67 | },
68 | "priority": 16
69 | }
70 | }
71 | }
72 |
73 | res = client.post("/api/v2/bacnet/confirmed-service", json=write_request)
74 | print_response(res)
75 |
--------------------------------------------------------------------------------
/examples/api/bacnet/v2/write-property.py:
--------------------------------------------------------------------------------
1 | """WriteProperty using an ApplicationDataValue type
2 | """
3 | import sys
4 | sys.path.append("../..")
5 |
6 | from helpers import NfClient, print_response
7 | client = NfClient()
8 |
9 | #device_adr = [192,168,103,178,0xba, 0xc0]
10 |
11 | res = client.post("/api/v2/bacnet/confirmed-service", json={
12 | "device_address": {
13 | "device_id": 260001,
14 | # "mac": base64.b64encode(bytes(device_adr)),
15 | },
16 | "request": {
17 | "write_property": {
18 | "object_identifier": {
19 | "object_type": "OBJECT_TYPE_ANALOG_OUTPUT",
20 | "instance":1,
21 | },
22 | "property_identifier": "PROPERTY_IDENTIFIER_PRESENT_VALUE",
23 | "property_array_index": 4294967295,
24 | "property_value": {
25 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
26 | "real": 1
27 | },
28 | "priority": 16,
29 | },
30 | }
31 | },)
32 | print_response(res)
33 |
--------------------------------------------------------------------------------
/examples/api/command/v2/get-commands.py:
--------------------------------------------------------------------------------
1 | """List currently running commands"""
2 |
3 | import sys
4 | sys.path.append("../..")
5 |
6 | from helpers import NfClient, print_response
7 |
8 | client = NfClient()
9 | res = client.get("/api/v2/command")
10 | print_response(res)
11 |
--------------------------------------------------------------------------------
/examples/api/command/v2/read.py:
--------------------------------------------------------------------------------
1 | """Read BACnet using the command service.
2 |
3 | Using the Command service instead of the BACnet service means that
4 | there is no need to manually construct the BACnet requests, or to
5 | split requests up by device.
6 |
7 | Example results:
8 | {
9 | "results": [
10 | {
11 | "point": {
12 | "uuid": "e88ad19c-718b-34ac-9911-f8186c0790aa",
13 | "layer": "hpl:bacnet:1"
14 | },
15 | "value": {
16 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
17 | "real": 0
18 | },
19 | "scalar": "0"
20 | },
21 | {
22 | "point": {
23 | "uuid": "163fc6f5-caeb-3cc8-8889-db41007094b9",
24 | "layer": "hpl:bacnet:1"
25 | },
26 | "value": {
27 | },
28 | {
29 | "point": {
30 | "uuid": "741362e5-f54a-3841-bc54-8cda81267479",
31 | "layer": "hpl:bacnet:1"
32 | },
33 | "value": {
34 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
35 | "real": 0
36 | },
37 | "scalar": "0"
38 | },
39 | {
40 | "point": {
41 | "uuid": "a7295987-af48-3998-a06d-4306989dbaff",
42 | "layer": "hpl:bacnet:1"
43 | },
44 | "value": {
45 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
46 | "real": 0
47 | },
48 | "scalar": "0"
49 | },
50 | {
51 | "point": {
52 | "uuid": "0837a003-dd13-37c5-b8f8-96b0344d4d34",
53 | "layer": "hpl:bacnet:1"
54 | },
55 | "value": {
56 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
57 | "unsigned": 1
58 | },
59 | "scalar": "1"
60 | }
61 | ],
62 | "errors": []
63 | }
64 |
65 | """
66 | import sys
67 | sys.path.append("../..")
68 |
69 | from helpers import NfClient, print_response
70 | client = NfClient()
71 |
72 | # we just use this to get some example points
73 | # this requires nf >= 2.1.1
74 | res = client.post("/api/v1/point/query", json={
75 | "structured_query": {
76 | "and": [
77 | {
78 | "field": {
79 | "property": "device_id",
80 | "text": "260001",
81 | }
82 | },
83 | {
84 | "or": [
85 | {
86 | "field": {
87 | "property": "type", "text": "OBJECT_ANALOG_INPUT",
88 | }
89 | }, {
90 | "field" : {
91 | "property": "type", "text": "OBJECT_ANALOG_VALUE",
92 | },
93 | },
94 | ],
95 | },
96 | ],
97 | },
98 | "layer": "hpl:bacnet:1",
99 | "page_size": "25",
100 | })
101 | uuids = list(map(lambda x: x["uuid"], res.json()["points"]))
102 |
103 | res = client.post("/api/v2/command/read", json={
104 | "reads": [ {
105 | "point": {
106 | "uuid": u,
107 | "layer": "hpl:bacnet:1",
108 | },
109 |
110 | # This is optional but you can use this to read a different BACnet property on the same object. This will generate an error if the underlying point is not really BACnet.
111 |
112 | #"bacnet_options": {
113 | # "property_identifier": "PROPERTY_IDENTIFIER_UNITS",
114 | #}
115 | } for u in uuids ],
116 | })
117 | print_response(res)
118 |
--------------------------------------------------------------------------------
/examples/api/command/v2/write-with-context.py:
--------------------------------------------------------------------------------
1 | """Write BACnet using the command service.
2 |
3 | Using the Command service instead of the BACnet service means that
4 | there is no need to manually construct the BACnet requests, or to
5 | split requests up by device.
6 |
7 | This example also uses a Command context for writes. When a command
8 | context is used, any writes made are relinquished automatically at the
9 | end of the context. The context ends when its lease time expires; or
10 | if manually cancelled.
11 |
12 | After running this program, get-commands will return the running
13 | command with writes; after 30 seconds, the writes will be
14 | relinquished.
15 |
16 | """
17 | import sys
18 | sys.path.append("../..")
19 |
20 | from helpers import NfClient, print_response
21 | client = NfClient()
22 |
23 | # we just use this to get some example points
24 | # this requires nf >= 2.1.1 fo rthe structured query interface
25 | res = client.post("/api/v1/point/query", json={
26 | "structured_query": {
27 | "and": [
28 | {
29 | "field": {
30 | "property": "device_id",
31 | "numeric": { "min_value": 260001, "max_value": 260001 }
32 | }
33 | },
34 | {
35 | "field" : {
36 | "property": "type", "text": "OBJECT_ANALOG_VALUE",
37 | },
38 | },
39 | ],
40 | },
41 | "layer": "hpl:bacnet:1",
42 | "page_size": "15",
43 | })
44 | print ("{}: {}".format(res.status_code, res.headers.get("grpc-message")))
45 | uuids = list(map(lambda x: x["uuid"], res.json()["points"]))
46 |
47 | # get a command context id for this command. if the name is in use,
48 | # this will return an error instead of allowing you to create two
49 | # conflicting command contexts.
50 | cmd = client.post("/api/v2/command", json={
51 | "name": "test context",
52 | "duration": "30s",
53 | })
54 | command_context_id = (cmd.json())["id"]
55 |
56 | res = client.post( "/api/v2/command/write", json={
57 | "command_id": command_context_id,
58 | "writes": [ {
59 | "point": {
60 | "uuid": u,
61 | "layer": "hpl:bacnet:1",
62 | },
63 | "value": {
64 | # the value needs to be an ApplicationDataValue. However,
65 | # unlike the direct BACnet API, the command service
66 | # performs type conversion. Therefore (for instance) even
67 | # though we are writing to the present-value of an Analog
68 | # Value in this example (which has type real), you may
69 | # also send a double or unsigned here,
70 | "real": "50" if len(sys.argv) < 2 else float(sys.argv[1]),
71 | },
72 | } for u in uuids ],
73 | })
74 |
75 | print_response(res)
76 |
--------------------------------------------------------------------------------
/examples/api/command/v2/write.py:
--------------------------------------------------------------------------------
1 | """Write BACnet using the command service.
2 |
3 | Using the Command service instead of the BACnet service means that
4 | there is no need to manually construct the BACnet requests, or to
5 | split requests up by device.
6 |
7 | Example results:
8 | {
9 | "results": [
10 | {
11 | "point": {
12 | "uuid": "7b79ab76-4f31-32ce-810a-78731c62b6fc",
13 | "layer": "hpl:bacnet:1"
14 | },
15 | "value": {
16 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
17 | "real": 0
18 | },
19 | "scalar": "0"
20 | },
21 | {
22 | "point": {
23 | "uuid": "a70cd4bc-1579-370a-9ef2-e40f8e1784a3",
24 | "layer": "hpl:bacnet:1"
25 | },
26 | "value": {
27 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
28 | "real": 0
29 | },
30 | "scalar": "0"
31 | },
32 | {
33 | "point": {
34 | "uuid": "ad20c463-3300-3274-9a1e-7349bc2d4915",
35 | "layer": "hpl:bacnet:1"
36 | },
37 | "value": {
38 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
39 | "real": 0
40 | },
41 | "scalar": "0"
42 | },
43 | {
44 | "point": {
45 | "uuid": "ab8bc8ca-3b25-362e-820e-7dbce42da398",
46 | "layer": "hpl:bacnet:1"
47 | },
48 | "value": {
49 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
50 | "real": 0
51 | },
52 | "scalar": "0"
53 | },
54 | {
55 | "point": {
56 | "uuid": "48a79186-8dee-300a-9f99-b1cb7da627d3",
57 | "layer": "hpl:bacnet:1"
58 | },
59 | "value": {
60 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
61 | "real": 50
62 | },
63 | "scalar": "50"
64 | },
65 | {
66 | "point": {
67 | "uuid": "bd13cf0f-9aac-3117-8ddd-e174c06d2c96",
68 | "layer": "hpl:bacnet:1"
69 | },
70 | "value": {
71 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
72 | "real": 50
73 | },
74 | "scalar": "50"
75 | },
76 | {
77 | "point": {
78 | "uuid": "ee157a8b-e8e2-38c6-a7d5-c991a2963f93",
79 | "layer": "hpl:bacnet:1"
80 | },
81 | "value": {
82 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
83 | "real": 50
84 | },
85 | "scalar": "50"
86 | },
87 | {
88 | "point": {
89 | "uuid": "06a65dea-f8ef-3ccf-989c-2646b68d4270",
90 | "layer": "hpl:bacnet:1"
91 | },
92 | "value": {
93 | "@type": "type.googleapis.com/normalgw.bacnet.v2.ApplicationDataValue",
94 | "real": 50
95 | },
96 | "scalar": "50"
97 | }
98 | ],
99 | "errors": []
100 | }"""
101 | import sys
102 | sys.path.append("../..")
103 |
104 | from helpers import NfClient, print_response
105 | client = NfClient()
106 |
107 | # we just use this to get some example points
108 | # this requires nf >= 2.1.1 fo rthe structured query interface
109 | res = client.post("/api/v1/point/query", json={
110 | "structured_query": {
111 | "and": [
112 | {
113 | "field": {
114 | "property": "device_id",
115 | "text": "260001"
116 | }
117 | },
118 | {
119 | "field" : {
120 | "property": "type", "text": "OBJECT_ANALOG_VALUE",
121 | },
122 | },
123 | ],
124 | },
125 | "layer": "hpl:bacnet:1",
126 | "page_size": "15",
127 | })
128 | print ("{}: {}".format(res.status_code, res.headers.get("grpc-message")))
129 | uuids = list(map(lambda x: x["uuid"], res.json()["points"]))
130 |
131 | res = client.post("/api/v2/command/write", json={
132 | "writes": [ {
133 | "point": {
134 | "uuid": u,
135 | "layer": "hpl:bacnet:1",
136 | },
137 | "value": {
138 | # the value needs to be an ApplicationDataValue. However,
139 | # unlike the direct BACnet API, the command service
140 | # performs type conversion. Therefore (for instance) even
141 | # though we are writing to the present-value of an Analog
142 | # Value in this example (which has type real), you may
143 | # also send a double or unsigned here,
144 | "real": "50" if len(sys.argv) < 2 else float(sys.argv[1]),
145 | },
146 | } for u in uuids ],
147 | })
148 |
149 | print_response(res)
150 |
--------------------------------------------------------------------------------
/examples/api/helpers.py:
--------------------------------------------------------------------------------
1 | """Example API wrapper
2 |
3 | This file implements helpers
4 | """
5 | import sys
6 | import json
7 | import os
8 | import requests
9 | import argparse
10 | import base64
11 |
12 | class JsonOauth(requests.auth.AuthBase):
13 | def __init__(self, base, oauth=None, creds=None):
14 | self.token = None
15 | self.base = base
16 | self.oauth = oauth
17 | self.creds = creds
18 |
19 | def handle_401(self, r, **kwargs):
20 | """
21 | If auth is configured, we may need to acquire a token and
22 | retry the request. This might not work.
23 | """
24 | r.content
25 | r.close()
26 |
27 |
28 | res = requests.post(self.base + "/api/v1/auth/token", json={
29 | "client_id": self.oauth[0],
30 | "client_secret": self.oauth[1],
31 | "grant_type": "client_credentials"
32 | })
33 |
34 | if res.status_code != 200:
35 | raise Exception("Invalid authentication: ")
36 |
37 | info = res.json()
38 | self.token = info.get("accessToken", info.get("access_token"))
39 |
40 | prep = r.request.copy()
41 | prep.headers["Authorization"] = "Bearer " + self.token
42 | _r = r.connection.send(prep, **kwargs)
43 | _r.history.append(r)
44 | _r.request = prep
45 | return _r
46 |
47 | def __call__(self, r):
48 | if self.oauth is not None:
49 | # if using token auth, we need to request an access token
50 | if self.token is None:
51 | r.register_hook("response", self.handle_401)
52 | else:
53 | r.headers["Authorization"] = "Bearer " + self.token
54 | elif self.creds is not None:
55 | # if using basic, just send the credentials
56 | r.headers["Authorization"] = "Basic " + base64.b64encode(self.creds.encode("utf-8")).decode("utf-8")
57 |
58 | return r
59 |
60 | class NfClient(object):
61 | """A simple wrapper for requests wihch adds authentication tokens"""
62 |
63 | def __init__(self):
64 | # pull the connection URL and so on from the environment
65 | parser = argparse.ArgumentParser("nf-client")
66 | parser.add_argument("--client-id", default=os.getenv("NF_CLIENT_ID"))
67 | parser.add_argument("--client-secret", default=os.getenv("NF_CLIENT_SECRET"))
68 | parser.add_argument("-u", "--user")
69 | parser.add_argument("--url", default=os.getenv("NFURL", "http://localhost:8080"))
70 | args, ignored = parser.parse_known_args()
71 |
72 |
73 | self.base = args.url.rstrip("/")
74 | self.auth = JsonOauth(self.base, oauth=(args.client_id, args.client_secret) if
75 | args.client_id and args.client_secret else None,
76 | creds=args.user)
77 |
78 | def get(self, path, *args, **kwargs):
79 | return requests.get(self.base + path, *args, auth=self.auth, **kwargs)
80 |
81 | def post(self, path, json={}, *args, **kwargs):
82 | return requests.post(self.base + path, auth=self.auth, json=json)
83 |
84 | def patch(self, path, json={}, *args, **kwargs):
85 | return requests.patch(self.base + path, auth=self.auth, json=json)
86 |
87 |
88 | def print_response(res):
89 | """Interpret the response from requests
90 | """
91 | if res.status_code == 200:
92 | json.dump(res.json(), sys.stdout, indent=2)
93 | print()#newline
94 | else:
95 | if "x-nf-unauthorized-reason" in res.headers:
96 | sys.stderr.write(res.headers.get("x-nf-unauthorized-reason") + "\n")
97 | sys.exit(1)
98 | else:
99 | # the headers should have more information for other error
100 | print (res.headers)
101 |
--------------------------------------------------------------------------------
/examples/api/hpl/v1/README.md:
--------------------------------------------------------------------------------
1 |
2 | Point API Examples
3 | ===
4 |
5 | The Point API provides a flexible interface for querying points in the
6 | system. This directory has examples of using it for a variety of
7 | usecases.
8 |
9 |
--------------------------------------------------------------------------------
/examples/api/hpl/v1/download-csv.py:
--------------------------------------------------------------------------------
1 | """Load 10 points from the object database
2 |
3 | The points will not have any particular order, since we don't provide a filter.
4 | """
5 |
6 | import sys
7 | sys.path.append("../..")
8 | import requests
9 |
10 | from helpers import NfClient, print_response
11 |
12 | import requests
13 | import pandas as pd
14 | from datetime import datetime, timedelta, timezone
15 | from urllib.parse import urlencode
16 | import sys
17 |
18 | def download_csv_from_api(
19 | client,
20 | uuids: list,
21 | headers: list,
22 | start: datetime,
23 | end: datetime
24 | ):
25 | if len(uuids) != len(headers):
26 | raise ValueError("Length of uuids and headers must match")
27 |
28 | uuid_to_header = dict(zip(uuids, headers))
29 | all_data = {}
30 |
31 | # Ensure times are UTC with Z suffix
32 | if start.tzinfo is None:
33 | start = start.replace(tzinfo=timezone.utc)
34 | else:
35 | start = start.astimezone(timezone.utc)
36 |
37 | if end.tzinfo is None:
38 | end = end.replace(tzinfo=timezone.utc)
39 | else:
40 | end = end.astimezone(timezone.utc)
41 |
42 | current = start
43 | while current < end:
44 | chunk_end = min(current + timedelta(days=1), end)
45 |
46 | params = {
47 | "uuids": uuids,
48 | "from": current.strftime('%Y-%m-%dT%H:%M:%SZ'),
49 | "to": chunk_end.strftime('%Y-%m-%dT%H:%M:%SZ'),
50 | "method": "FIRST",
51 | "window": '900s',
52 | }
53 |
54 | query = urlencode(params, doseq=True)
55 | url = f"/api/v1/point/data?{query}"
56 | resp = client.get(url)
57 |
58 | if resp.status_code != 200:
59 | raise RuntimeError(f"Failed to fetch data: {resp.status_code} - {resp.text}")
60 |
61 | json_data = resp.json()
62 | for series in json_data.get("data", []):
63 | uuid = series["uuid"]
64 | col_name = uuid_to_header.get(uuid, uuid)
65 | for entry in series.get("values", []):
66 | ts = entry["ts"]
67 | val = entry.get("double", None)
68 | all_data.setdefault(ts, {})[col_name] = val
69 |
70 | current = chunk_end
71 |
72 | # Create dataframe
73 | df = pd.DataFrame.from_dict(all_data, orient="index")
74 | df.index.name = "timestamp"
75 | df.sort_index(inplace=True)
76 |
77 | # Ensure all headers are present even if data was missing
78 | df = df.reindex(columns=headers)
79 |
80 | df.to_csv(sys.stdout, na_rep="")
81 |
82 |
83 |
84 | client = NfClient()
85 | res = client.post("/api/v1/point/query", json={
86 | "page_size": 500,
87 | "structuredQuery": {
88 | "and": [
89 | {
90 | "field": {
91 | "property": "equipRef",
92 | "text": "Diggs_RTU7"
93 | }
94 | },
95 | {
96 | "or": [
97 | {
98 | "field": {
99 | "property": "period",
100 | "numeric": {
101 | "minValue": 30,
102 | "maxValue": 30
103 | }
104 | }
105 | },
106 | {
107 | "field": {
108 | "property": "period",
109 | "numeric": {
110 | "minValue": 300,
111 | "maxValue": 300
112 | }
113 | }
114 | },
115 | {
116 | "field": {
117 | "property": "period",
118 | "numeric": {
119 | "minValue": 60,
120 | "maxValue": 60
121 | }
122 | }
123 | },
124 | {
125 | "field": {
126 | "property": "period",
127 | "numeric": {
128 | "minValue": 900,
129 | "maxValue": 900
130 | }
131 | }
132 | }
133 | ]
134 | }
135 | ]
136 | }})
137 |
138 |
139 | points = res.json()
140 | headers = [p["name"] for p in points["points"]]
141 | uuids = [p["uuid"] for p in points["points"]]
142 |
143 | download_csv_from_api(client, uuids, headers, start=datetime(2025, 5, 10), end=datetime(2025, 5, 16))
144 |
--------------------------------------------------------------------------------
/examples/api/hpl/v1/get-points-device-id-fieldmask.py:
--------------------------------------------------------------------------------
1 | """Load 10 points from the object database where device_id=260001,
2 | using a field mask to only return certain fields
3 | """
4 |
5 | import sys
6 | sys.path.append("../..")
7 | import requests
8 |
9 | from helpers import NfClient, print_response
10 |
11 | client = NfClient()
12 | res = client.post("/api/v1/point/query", json={
13 | "page_size": 10,
14 | "structured_query": {
15 | "field": {
16 | # property is the attribute name.
17 | "property": "device_id",
18 | # use a numeric query. For a numeric query to work, the
19 | # field has to be indexed as NUMERIC in the layer
20 | # definition.
21 | "numeric": {
22 | "min_value": 260001,
23 | "max_value": 260001
24 | },
25 | }
26 | },
27 | #
28 | "masks": {
29 | # fields are attributes on the point object
30 | "field_mask": ["uuid", "latest_value"],
31 | # attrs_include_mask whitelists certain attributes which
32 | # appear in layers
33 | "attr_include_mask": ["type", "instance"]
34 | }
35 | })
36 | print_response(res)
37 |
--------------------------------------------------------------------------------
/examples/api/hpl/v1/get-points-device-id.py:
--------------------------------------------------------------------------------
1 | """Load 10 points from the object database where device_id=260001
2 | """
3 |
4 | import sys
5 | sys.path.append("../..")
6 | import requests
7 |
8 | from helpers import NfClient, print_response
9 |
10 | client = NfClient()
11 | res = client.post("/api/v1/point/query", json={
12 | "page_size": 10,
13 | "structured_query": {
14 | "field": {
15 | # property is the attribute name.
16 | "property": "device_id",
17 | # use a numeric query. For a numeric query to work, the
18 | # field has to be indexed as NUMERIC in the layer
19 | # definition.
20 | "numeric": {
21 | "min_value": 260001,
22 | "max_value": 260001
23 | },
24 | }
25 | }
26 | })
27 | print_response(res)
28 |
--------------------------------------------------------------------------------
/examples/api/hpl/v1/get-points.py:
--------------------------------------------------------------------------------
1 | """Load 10 points from the object database
2 |
3 | The points will not have any particular order, since we don't provide a filter.
4 | """
5 |
6 | import sys
7 | sys.path.append("../..")
8 | import requests
9 |
10 | from helpers import NfClient, print_response
11 |
12 | client = NfClient()
13 | res = client.post("/api/v1/point/query", json={
14 | "page_size": 10
15 | })
16 | print_response(res)
17 |
18 |
--------------------------------------------------------------------------------
/examples/api/platform/v1/branding.py:
--------------------------------------------------------------------------------
1 | """Upload a file and set it as a logo
2 | This example demonstrates uploading an image file and setting it as one of the branding variables.
3 | Usage `python branding.py `
4 | E.g. `python branding.py ~/assets/logo.png FAV_ICON_FILE`
5 | """
6 |
7 | import sys
8 | sys.path.append("../..")
9 | import base64
10 | from helpers import NfClient, print_response
11 |
12 | file = sys.argv[1]
13 | env_var = sys.argv[2]
14 |
15 | client = NfClient()
16 |
17 | # # Upload the image
18 | with open(file, 'rb') as image_file:
19 | encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
20 |
21 | upload_res = client.post('/api/v1/upload',
22 | params={'name': image_file.name},
23 | json=encoded_string
24 | )
25 |
26 | print_response(upload_res)
27 | file_token = upload_res.json()["fileToken"]
28 |
29 | # Set the uploaded file as a configuration variable
30 | env_res = client.post('/api/v1/platform/env', json={
31 | "variables": [{
32 | "id": env_var, #"FAV_ICON_FILE",
33 | "is_advanced": False,
34 | "is_default": False,
35 | "is_empty": False,
36 | "file": {
37 | "file_name": file_token,
38 | }
39 | }]
40 | })
--------------------------------------------------------------------------------
/ha/00-deploy-system-dependencies.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Deploy system dependencies for installing normal framework
3 | hosts: all
4 | become: yes
5 | become_user: root
6 |
7 | tasks:
8 | - name: Remove snap version of docker if installed
9 | community.general.snap:
10 | name: docker
11 | state: absent
12 |
13 | - name: Install aptitude using apt
14 | apt: name=aptitude state=latest update_cache=yes force_apt_get=yes
15 |
16 | - name: Install required system packages
17 | apt: name={{ item }} state=latest update_cache=yes
18 | loop: [ 'apt-transport-https', 'ca-certificates', 'curl', 'software-properties-common', 'python3-pip', 'virtualenv', 'python3-setuptools', 'docker-compose', 'glusterfs-server' ]
19 |
20 | - name: Install Docker Module for Python
21 | pip:
22 | name: docker
23 |
24 | - name: Install Docker-compose Module for Python
25 | pip:
26 | name: docker-compose
27 |
28 | - name: Log into Docker Hub
29 | community.docker.docker_login:
30 | registry_url: normalframework.azurecr.io
31 | username: '{{ docker_username }}'
32 | password: '{{ docker_password }}'
33 |
--------------------------------------------------------------------------------
/ha/01-deploy-glusterfs.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Deploy a shared GlusterFS filesystem
3 | hosts: all
4 | become: yes
5 | become_user: root
6 | tasks:
7 | - name: Enable service glusterd
8 | ansible.builtin.service:
9 | name: glusterd
10 | enabled: yes
11 | state: started
12 |
13 | - name: Create a trusted storage pool
14 | run_once: true
15 | gluster_peer:
16 | state: present
17 | nodes: "{{ gluster_node_ips }}"
18 |
19 | - name: Create the brick storage directory
20 | ansible.builtin.file:
21 | path: /data/brick1
22 | state: directory
23 | mode: '0755'
24 |
25 | - name: Create the mount point the gluster volume
26 | ansible.builtin.file:
27 | path: /data/nf
28 | state: directory
29 | mode: '0755'
30 |
31 | - name: Create the redis data directory
32 | ansible.builtin.file:
33 | path: /data/redis
34 | state: directory
35 | mode: '0755'
36 |
37 | - name: Create the gluster volume
38 | run_once: true
39 | gluster_volume:
40 | state: present
41 | replicas: 3
42 | name: volume0
43 | bricks: /data/brick1/store
44 | force: 'yes'
45 | cluster: "{{ gluster_node_ips }}"
46 |
47 | - name: Start the gluster volume
48 | run_once: true
49 | gluster_volume:
50 | state: started
51 | name: volume0
52 | cluster: "{{ gluster_node_ips }}"
53 |
54 | - name: Ensure Gluster volume is mounted on all nodes
55 | mount:
56 | name: "/var/nf"
57 | src: "localhost:volume0"
58 | fstype: glusterfs
59 | opts: "defaults,_netdev"
60 | state: mounted
61 |
62 | vars:
63 | gluster_node_ips:
64 | - '{{ hostvars["nfha-1"].ansible_host }}'
65 | - '{{ hostvars["nfha-2"].ansible_host }}'
66 | - '{{ hostvars["nfha-3"].ansible_host }}'
67 |
--------------------------------------------------------------------------------
/ha/02-deploy-normal.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Deploy Normal Framework in a HA configuration
3 | hosts: all
4 | become: yes
5 | become_user: root
6 | vars:
7 | redis_master: '{{ hostvars["nfha-1"]["ansible_host"] }}'
8 |
9 | tasks:
10 | - name: "Create /etc/nf"
11 | file:
12 | name: /etc/nf
13 | state: directory
14 | owner: root
15 | group: root
16 |
17 | - name: "Create promotion directory"
18 | file:
19 | name: /etc/nf/promote.d
20 | state: directory
21 | owner: root
22 | group: root
23 |
24 | - name: "Create demotion directory"
25 | file:
26 | name: /etc/nf/demote.d
27 | state: directory
28 | owner: root
29 | group: root
30 |
31 | - name: "Create promote script"
32 | ansible.builtin.copy:
33 | src: files/01-start-nf.sh
34 | dest: /etc/nf/promote.d/
35 | owner: root
36 | group: root
37 | mode: 0755
38 |
39 | - name: "Create demote script"
40 | ansible.builtin.copy:
41 | src: files/01-stop-nf.sh
42 | dest: /etc/nf/demote.d/
43 | owner: root
44 | group: root
45 | mode: 0755
46 |
47 | - name: "Deploy the NF/HA docker-compose file"
48 | template:
49 | src: files/docker-compose-2.1-redis-ha.yaml.j2
50 | dest: /etc/nf/docker-compose.yaml
51 |
52 | - name: "Deploy the redis-sentinel config file"
53 | template:
54 | src: files/sentinel.conf.j2
55 | dest: /etc/nf/sentinel.conf
56 |
57 | - name: "Deploy the redis client reconfig script"
58 | template:
59 | src: files/reconfigure.sh
60 | mode: 0755
61 | dest: /etc/nf/reconfigure.sh
62 |
63 | - name: Pull the latest docker containers
64 | community.docker.docker_compose:
65 | project_src: /etc/nf
66 | pull: yes
67 | recreate: always
68 | register: output
69 |
70 | - name: Manually run reconfigure to start NF on primary
71 | ansible.builtin.shell: /etc/nf/reconfigure.sh
72 |
--------------------------------------------------------------------------------
/ha/README.md:
--------------------------------------------------------------------------------
1 |
2 | High Availability Playbooks
3 | ===========================
4 |
5 | These instructions are tested on Ubuntu Server 22; but should be able
6 | to be easily adapted to other environments. They are desgined to
7 | automate some of the more common administration tasks on a NF/HA
8 | cluster.
9 |
10 | 1. Make sure `/data/` is a persistant local filesystem on each node. The
11 | glusterfs bricks and redis data will be placed there.
12 |
13 | 2. Edit hosts.yml to provide the correct IP addresses and usernames to
14 | communicate with the NF nodes. Do not rename the nodes since they
15 | are used in configuration templates.
16 |
17 | 3. Execute `ansible-playbook -i hosts.yml 00-deploy-system-dependencies.yml --ask-become-pass`
18 | if needed to install system dependencies like docker and the required
19 | python mdules for ansible.
20 |
21 | 5. Execute `ansible-playbook -i hosts.yml 01-deploy-glusterfs.yml
22 | --ask-become-pass` to configure the glusterfs shared storage, and mount
23 | on all nodes. After this step, `/var/nf` on all nodes should be a shared, replicated volume.
24 |
25 | 6. Executre `ansible-playbook -i hosts.yml 02-deploy-normal.yml --ask-become-pass`
26 | deploy Normal. After this step
27 | has completed, you may want to inspect the redis server and redis
28 | sentinel services now running on each machine. Normal will be running on `nfha-1` initially.
29 |
30 |
31 | Further steps include integrating with DNS or a load balancer for automated failover.
32 |
--------------------------------------------------------------------------------
/ha/files/01-start-nf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | echo "Starting nf"
4 | curl -XPOST --unix-socket /var/run/docker.sock http:/localhost/containers/nf_nf_1/start -o-
5 |
--------------------------------------------------------------------------------
/ha/files/01-stop-nf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | echo "Stopping nf"
4 | curl -XPOST --unix-socket /var/run/docker.sock http:/localhost/containers/nf_nf_1/stop -o-
5 |
6 |
--------------------------------------------------------------------------------
/ha/files/docker-compose-2.1-redis-ha.yaml.j2:
--------------------------------------------------------------------------------
1 | # ___ ___
2 | # /__/\ / /\
3 | # \ \:\ / /:/_
4 | # \ \:\ / /:/ /\
5 | # _____\__\:\ / /:/ /:/
6 | # /__/::::::::\ /__/:/ /:/
7 | # \ \:\~~\~~\/ \ \:\/:/
8 | # \ \:\ ~~~ \ \::/
9 | # \ \:\ \ \:\
10 | # \ \:\ \ \:\
11 | # \__\/ \__\/
12 | #
13 | #
14 |
15 | # Welcome to the NF example docker-compose. This file should stand up
16 | # a mostly functioning NF instance on your desktop. Before running
17 | # you should just check a few things:
18 | # 1. several containers need a writable volume, which this file puts in /tmp by default
19 | # 2. if the BACnet service fails to start, you may need to specify
20 | # which interface to use in its environment.
21 | #
22 | # This compose file runs entirely in a docker internal network, along
23 | # with a simple BACnet simulator.
24 |
25 | version: "2.2"
26 | services:
27 |
28 | # Most persistent data is in redis. This is essentially an
29 | # unmodified upstream with RediSearch and RedisTimeseries installed.
30 | redis:
31 | image: normalframework.azurecr.io/normalframework/redis:2.1
32 | command: redis-server /etc/redis.conf {% if ansible_hostname != "nfha-1" %} --replicaof {{ redis_master }} 6379 {% endif %}
33 |
34 | ports:
35 | - "6379:6379"
36 | volumes:
37 | - /data/redis:/data
38 | restart: unless-stopped
39 |
40 | redis-sentinel:
41 | image: normalframework.azurecr.io/normalframework/redis:2.1
42 | ports:
43 | - "26379:26379"
44 | command: redis-sentinel /etc/nf/sentinel.conf
45 | restart: unless-stopped
46 | volumes:
47 | - /etc/nf:/etc/nf
48 | - /var/run/docker.sock:/var/run/docker.sock
49 |
50 | # the main container which runs all the NF services, load balancer,
51 | # and management console.
52 | nf:
53 | image: normalframework.azurecr.io/normalframework/nf-full:2.1
54 | network_mode: host
55 | depends_on:
56 | - redis
57 | volumes:
58 | - "/var/nf:/var/nf"
59 | tmpfs:
60 | - /run:exec
61 | - /etc/nginx/conf.d/
62 | restart: unless-stopped
63 |
--------------------------------------------------------------------------------
/ha/files/reconfigure.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ $# -eq 0 ]; then
4 | TO_IP=$(docker exec nf_redis-sentinel_1 redis-cli -p 26379 SENTINEL GET-MASTER-ADDR-BY-NAME nfmaster | head -n1)
5 | else
6 | TO_IP=$6
7 | fi
8 | echo "Client reconfiguration; new master is $TO_IP"
9 |
10 | if [ "$TO_IP" = "{{ ansible_host }}" ]; then
11 | run-parts /etc/nf/promote.d/ --regex '^.*$'
12 | else
13 | run-parts /etc/nf/demote.d/ --regex '^.*$'
14 | fi
15 |
16 | exit 0
17 |
--------------------------------------------------------------------------------
/ha/files/sentinel.conf.j2:
--------------------------------------------------------------------------------
1 | sentinel monitor nfmaster {{hostvars["nfha-1"]["ansible_host"]}} 6379 2
2 | sentinel down-after-milliseconds nfmaster 60000
3 | sentinel failover-timeout nfmaster 180000
4 | sentinel parallel-syncs nfmaster 1
5 | sentinel client-reconfig-script nfmaster /etc/nf/reconfigure.sh
6 | # need this since otherwise we advertise our docker internal IP which doesn't really work
7 | sentinel announce-ip {{ ansible_host }}
8 |
--------------------------------------------------------------------------------
/ha/hosts.yml:
--------------------------------------------------------------------------------
1 |
2 | nodes:
3 | hosts:
4 | # nfha-1 will be the initial primary redis master
5 | nfha-1:
6 | ansible_host: 192.168.103.248
7 | ansible_user: sdhags
8 | nfha-2:
9 | ansible_host: 192.168.103.150
10 | ansible_user: stevedh
11 | nfha-3:
12 | ansible_host: 192.168.103.217
13 | ansible_user: stevedh
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/iotedge/manifest-1.4.json:
--------------------------------------------------------------------------------
1 | {
2 | "modulesContent": {
3 | "$edgeAgent": {
4 | "properties.desired": {
5 | "schemaVersion": "1.0",
6 | "runtime": {
7 | "type": "docker",
8 | "settings": {
9 | "minDockerVersion": "v1.25",
10 | "loggingOptions": "",
11 | "registryCredentials": {
12 | "normalframework": {
13 | "username": "normalframework",
14 | "password": "",
15 | "address": "normalframework.azurecr.io"
16 | }
17 | }
18 | }
19 | },
20 | "systemModules": {
21 | "edgeAgent": {
22 | "type": "docker",
23 | "settings": {
24 | "image": "mcr.microsoft.com/azureiotedge-agent:1.2",
25 | "createOptions": ""
26 | }
27 | },
28 | "edgeHub": {
29 | "type": "docker",
30 | "status": "running",
31 | "restartPolicy": "always",
32 | "settings": {
33 | "image": "mcr.microsoft.com/azureiotedge-hub:1.2",
34 | "createOptions": "{\"HostConfig\":{\"PortBindings\":{\"5671/tcp\":[{\"HostPort\":\"5671\"}], \"8883/tcp\":[{\"HostPort\":\"8883\"}],\"443/tcp\":[{\"HostPort\":\"443\"}]}}}"
35 | },
36 | "env": {
37 | "SslProtocols": {
38 | "value": "tls1.2"
39 | }
40 | }
41 | }
42 | },
43 | "modules": {
44 | "nf": {
45 | "version": "1.0",
46 | "type": "docker",
47 | "status": "running",
48 | "restartPolicy": "always",
49 | "settings": {
50 | "image": "normalframework.azurecr.io/normalframework/nf-full:1.4",
51 | "createOptions": "{\"NetworkingConfig\": {\"EndpointsConfig\": {\"host\": {}}}, \"HostConfig\": {\"NetworkMode\": \"host\", \"CapDrop\": []}}"
52 |
53 | }
54 | },
55 | "redis": {
56 | "version": "1.0",
57 | "type": "docker",
58 | "status": "running",
59 | "restartPolicy": "always",
60 | "settings": {
61 | "image": "normalframework.azurecr.io/normalframework/redis:1.4",
62 | "createOptions": "{\"HostConfig\": {\"CapDrop\": [], \"PortBindings\":{\"6379/tcp\":[{\"HostPort\":\"6379\"}]} }}"
63 | }
64 | }
65 | }
66 | }
67 | },
68 | "$edgeHub": {
69 | "properties.desired": {
70 | "schemaVersion": "1.0",
71 | "routes": {
72 | },
73 | "storeAndForwardConfiguration": {
74 | "timeToLiveSecs": 7200
75 | }
76 | }
77 | }
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/logo_nf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/normalframework/nf-sdk/7aca48850a7ca03308dab275ab86a742836440bf/logo_nf.png
--------------------------------------------------------------------------------
/solutions/idl/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/normalframework/nf-sdk/7aca48850a7ca03308dab275ab86a742836440bf/solutions/idl/assets/favicon.ico
--------------------------------------------------------------------------------
/solutions/idl/assets/logo.svg:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/solutions/idl/dashboards/normal/equipment-timeseries.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "type": "dashboard"
15 | }
16 | ]
17 | },
18 | "editable": true,
19 | "fiscalYearStartMonth": 0,
20 | "graphTooltip": 0,
21 | "id": 3,
22 | "links": [],
23 | "panels": [
24 | {
25 | "datasource": {
26 | "type": "grafana-postgresql-datasource",
27 | "uid": "normal-timescaledb"
28 | },
29 | "fieldConfig": {
30 | "defaults": {
31 | "color": {
32 | "mode": "palette-classic"
33 | },
34 | "custom": {
35 | "axisBorderShow": false,
36 | "axisCenteredZero": false,
37 | "axisColorMode": "text",
38 | "axisLabel": "",
39 | "axisPlacement": "auto",
40 | "barAlignment": 0,
41 | "barWidthFactor": 0.6,
42 | "drawStyle": "line",
43 | "fillOpacity": 0,
44 | "gradientMode": "none",
45 | "hideFrom": {
46 | "legend": false,
47 | "tooltip": false,
48 | "viz": false
49 | },
50 | "insertNulls": false,
51 | "lineInterpolation": "linear",
52 | "lineWidth": 1,
53 | "pointSize": 5,
54 | "scaleDistribution": {
55 | "type": "linear"
56 | },
57 | "showPoints": "auto",
58 | "spanNulls": false,
59 | "stacking": {
60 | "group": "A",
61 | "mode": "none"
62 | },
63 | "thresholdsStyle": {
64 | "mode": "off"
65 | }
66 | },
67 | "mappings": [],
68 | "thresholds": {
69 | "mode": "absolute",
70 | "steps": [
71 | {
72 | "color": "green",
73 | "value": null
74 | }
75 | ]
76 | }
77 | },
78 | "overrides": []
79 | },
80 | "gridPos": {
81 | "h": 10,
82 | "w": 24,
83 | "x": 0,
84 | "y": 0
85 | },
86 | "id": 1,
87 | "options": {
88 | "legend": {
89 | "calcs": [],
90 | "displayMode": "list",
91 | "placement": "bottom",
92 | "showLegend": true
93 | },
94 | "tooltip": {
95 | "hideZeros": false,
96 | "mode": "single",
97 | "sort": "none"
98 | }
99 | },
100 | "pluginVersion": "11.5.1",
101 | "targets": [
102 | {
103 | "datasource": {
104 | "type": "grafana-postgresql-datasource",
105 | "uid": "normal-timescaledb"
106 | },
107 | "editorMode": "code",
108 | "format": "table",
109 | "rawQuery": true,
110 | "rawSql": "SELECT\n lm.\"equipRef\" || ' - ' || p.name,\n d.value as \"Value\",\n $__time(d.time)\nFROM\n \"data\" d\n JOIN points p ON d.point_id = p.id\n join layer_model lm on p.id = lm.point_id\n JOIN equipment_type et ON lm.\"equipTypeId\" = et.id\nWHERE\n $__timeFilter(d.time)\n and (\n lm.\"equipRef\" IN ($equip)\n or '${equip:csv}' = ''''\n )\n and (\n et.name = '$equipType'\n or '$equipType' = ''\n )\n and lm.class not is null\n and (\n p.name IN ($objectName)\n or '${objectName:csv}' = ''''\n )\norder by\n time",
111 | "refId": "A",
112 | "sql": {
113 | "columns": [
114 | {
115 | "parameters": [],
116 | "type": "function"
117 | }
118 | ],
119 | "groupBy": [
120 | {
121 | "property": {
122 | "type": "string"
123 | },
124 | "type": "groupBy"
125 | }
126 | ],
127 | "limit": 50
128 | }
129 | }
130 | ],
131 | "title": "Tagged Points",
132 | "transformations": [
133 | {
134 | "id": "prepareTimeSeries",
135 | "options": {
136 | "format": "multi"
137 | }
138 | }
139 | ],
140 | "type": "timeseries"
141 | },
142 | {
143 | "datasource": {
144 | "type": "grafana-postgresql-datasource",
145 | "uid": "normal-timescaledb"
146 | },
147 | "fieldConfig": {
148 | "defaults": {
149 | "color": {
150 | "mode": "palette-classic"
151 | },
152 | "custom": {
153 | "axisBorderShow": false,
154 | "axisCenteredZero": false,
155 | "axisColorMode": "text",
156 | "axisLabel": "",
157 | "axisPlacement": "auto",
158 | "barAlignment": 0,
159 | "barWidthFactor": 0.6,
160 | "drawStyle": "line",
161 | "fillOpacity": 0,
162 | "gradientMode": "none",
163 | "hideFrom": {
164 | "legend": false,
165 | "tooltip": false,
166 | "viz": false
167 | },
168 | "insertNulls": false,
169 | "lineInterpolation": "linear",
170 | "lineWidth": 1,
171 | "pointSize": 5,
172 | "scaleDistribution": {
173 | "type": "linear"
174 | },
175 | "showPoints": "auto",
176 | "spanNulls": false,
177 | "stacking": {
178 | "group": "A",
179 | "mode": "none"
180 | },
181 | "thresholdsStyle": {
182 | "mode": "off"
183 | }
184 | },
185 | "mappings": [],
186 | "thresholds": {
187 | "mode": "absolute",
188 | "steps": [
189 | {
190 | "color": "green",
191 | "value": null
192 | }
193 | ]
194 | }
195 | },
196 | "overrides": []
197 | },
198 | "gridPos": {
199 | "h": 10,
200 | "w": 24,
201 | "x": 0,
202 | "y": 10
203 | },
204 | "id": 2,
205 | "options": {
206 | "legend": {
207 | "calcs": [],
208 | "displayMode": "list",
209 | "placement": "bottom",
210 | "showLegend": true
211 | },
212 | "tooltip": {
213 | "hideZeros": false,
214 | "mode": "single",
215 | "sort": "none"
216 | }
217 | },
218 | "pluginVersion": "11.5.1",
219 | "targets": [
220 | {
221 | "datasource": {
222 | "type": "grafana-postgresql-datasource",
223 | "uid": "normal-timescaledb"
224 | },
225 | "editorMode": "code",
226 | "format": "table",
227 | "rawQuery": true,
228 | "rawSql": "SELECT\n lm.\"equipRef\" || ' - ' || p.name,\n d.value as \"Value\",\n $__time(d.time)\nFROM\n \"data\" d\n JOIN points p ON d.point_id = p.id\n join layer_model lm on p.id = lm.point_id\n JOIN equipment_type et ON lm.\"equipTypeId\" = et.id\nWHERE\n $__timeFilter(d.time)\n and (lm.\"equipRef\" IN ($equip) or '${equip:csv}' = '''')\n and (et.name = '$equipType' or '$equipType' = '')\n and lm.class is null\n and (p.name IN ($objectName) or '${objectName:csv}' = '''')\norder by\n time",
229 | "refId": "A",
230 | "sql": {
231 | "columns": [
232 | {
233 | "parameters": [],
234 | "type": "function"
235 | }
236 | ],
237 | "groupBy": [
238 | {
239 | "property": {
240 | "type": "string"
241 | },
242 | "type": "groupBy"
243 | }
244 | ],
245 | "limit": 50
246 | }
247 | }
248 | ],
249 | "title": "Untagged Points",
250 | "transformations": [
251 | {
252 | "id": "prepareTimeSeries",
253 | "options": {
254 | "format": "multi"
255 | }
256 | }
257 | ],
258 | "type": "timeseries"
259 | }
260 | ],
261 | "preload": false,
262 | "schemaVersion": 40,
263 | "tags": [],
264 | "templating": {
265 | "list": [
266 | {
267 | "current": {},
268 | "datasource": {
269 | "type": "grafana-postgresql-datasource",
270 | "uid": "normal-timescaledb"
271 | },
272 | "definition": "SELECT distinct \"name\" FROM equipment_type",
273 | "includeAll": false,
274 | "label": "Equip Type",
275 | "name": "equipType",
276 | "options": [],
277 | "query": "SELECT distinct \"name\" FROM equipment_type",
278 | "refresh": 1,
279 | "regex": "",
280 | "sort": 1,
281 | "type": "query"
282 | },
283 | {
284 | "allValue": "''",
285 | "current": {
286 | "text": [
287 | "All"
288 | ],
289 | "value": [
290 | "$__all"
291 | ]
292 | },
293 | "datasource": {
294 | "type": "grafana-postgresql-datasource",
295 | "uid": "normal-timescaledb"
296 | },
297 | "definition": "SELECT distinct mm.\"equipRef\" FROM layer_model mm\n join equipment_type et on mm.\"equipTypeId\" = et.id\n where mm.\"equipRef\" <> '' and (et.name = '$equipType' or '$equipType' = '')",
298 | "includeAll": true,
299 | "label": "Equip",
300 | "multi": true,
301 | "name": "equip",
302 | "options": [],
303 | "query": "SELECT distinct mm.\"equipRef\" FROM layer_model mm\n join equipment_type et on mm.\"equipTypeId\" = et.id\n where mm.\"equipRef\" <> '' and (et.name = '$equipType' or '$equipType' = '')",
304 | "refresh": 1,
305 | "regex": "",
306 | "type": "query"
307 | },
308 | {
309 | "allValue": "''",
310 | "current": {
311 | "text": [
312 | "All"
313 | ],
314 | "value": [
315 | "$__all"
316 | ]
317 | },
318 | "datasource": {
319 | "type": "grafana-postgresql-datasource",
320 | "uid": "normal-timescaledb"
321 | },
322 | "definition": "SELECT distinct lm.class\nFROM layer_model lm\n join equipment_type et on lm.\"equipTypeId\" = et.id\nwhere (et.name = '$equipType' or '$equipType' = '')\n and (lm.\"equipRef\" IN ($equip) or '${equip:csv}' = '''')\n and lm.class is not null and lm.class <> ''",
323 | "includeAll": true,
324 | "label": "Class",
325 | "multi": true,
326 | "name": "class",
327 | "options": [],
328 | "query": "SELECT distinct lm.class\nFROM layer_model lm\n join equipment_type et on lm.\"equipTypeId\" = et.id\nwhere (et.name = '$equipType' or '$equipType' = '')\n and (lm.\"equipRef\" IN ($equip) or '${equip:csv}' = '''')\n and lm.class is not null and lm.class <> ''",
329 | "refresh": 1,
330 | "regex": "",
331 | "type": "query"
332 | },
333 | {
334 | "allValue": "''",
335 | "current": {
336 | "text": [
337 | "All"
338 | ],
339 | "value": [
340 | "$__all"
341 | ]
342 | },
343 | "datasource": {
344 | "type": "grafana-postgresql-datasource",
345 | "uid": "normal-timescaledb"
346 | },
347 | "definition": "SELECT DISTINCT p.name\nFROM points p\n JOIN layer_model lm ON lm.point_id = p.id \n JOIN equipment_type et ON lm.\"equipTypeId\" = et.id\nWHERE (lm.\"equipRef\" IN ($equip) or '${equip:csv}' = '''')\n and (et.name = '$equipType' or '$equipType' = '')",
348 | "includeAll": true,
349 | "label": "Object Name",
350 | "multi": true,
351 | "name": "objectName",
352 | "options": [],
353 | "query": "SELECT DISTINCT p.name\nFROM points p\n JOIN layer_model lm ON lm.point_id = p.id \n JOIN equipment_type et ON lm.\"equipTypeId\" = et.id\nWHERE (lm.\"equipRef\" IN ($equip) or '${equip:csv}' = '''')\n and (et.name = '$equipType' or '$equipType' = '')",
354 | "refresh": 1,
355 | "regex": "",
356 | "type": "query"
357 | },
358 | {
359 | "current": {
360 | "text": "Class",
361 | "value": "mm.class"
362 | },
363 | "includeAll": false,
364 | "label": "Label",
365 | "name": "label",
366 | "options": [
367 | {
368 | "selected": true,
369 | "text": "Class",
370 | "value": "mm.class"
371 | },
372 | {
373 | "selected": false,
374 | "text": "Object Name",
375 | "value": "pm.name"
376 | }
377 | ],
378 | "query": "Class : lm.class,Object Name : p.name",
379 | "type": "custom"
380 | }
381 | ]
382 | },
383 | "time": {
384 | "from": "now-6h",
385 | "to": "now"
386 | },
387 | "timepicker": {},
388 | "timezone": "browser",
389 | "title": "Equipment TimeSeries",
390 | "uid": "fdu0bcp6imrr4b",
391 | "version": 1,
392 | "weekStart": ""
393 | }
--------------------------------------------------------------------------------
/solutions/idl/dashboards/normal/home.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "type": "dashboard"
15 | }
16 | ]
17 | },
18 | "editable": true,
19 | "fiscalYearStartMonth": 0,
20 | "graphTooltip": 0,
21 | "id": 3,
22 | "links": [],
23 | "panels": [
24 | {
25 | "collapsed": false,
26 | "gridPos": {
27 | "h": 1,
28 | "w": 24,
29 | "x": 0,
30 | "y": 0
31 | },
32 | "id": 5,
33 | "panels": [],
34 | "title": "Main",
35 | "type": "row"
36 | },
37 | {
38 | "datasource": {
39 | "type": "grafana-postgresql-datasource",
40 | "uid": "normal-timescaledb"
41 | },
42 | "fieldConfig": {
43 | "defaults": {
44 | "color": {
45 | "mode": "palette-classic"
46 | },
47 | "mappings": [],
48 | "thresholds": {
49 | "mode": "absolute",
50 | "steps": [
51 | {
52 | "color": "green",
53 | "value": null
54 | }
55 | ]
56 | }
57 | },
58 | "overrides": []
59 | },
60 | "gridPos": {
61 | "h": 6,
62 | "w": 12,
63 | "x": 0,
64 | "y": 1
65 | },
66 | "id": 3,
67 | "options": {
68 | "minVizHeight": 75,
69 | "minVizWidth": 75,
70 | "orientation": "auto",
71 | "reduceOptions": {
72 | "calcs": [
73 | "lastNotNull"
74 | ],
75 | "fields": "",
76 | "values": false
77 | },
78 | "showThresholdLabels": false,
79 | "showThresholdMarkers": false,
80 | "sizing": "auto"
81 | },
82 | "pluginVersion": "11.5.0",
83 | "targets": [
84 | {
85 | "datasource": {
86 | "type": "grafana-postgresql-datasource",
87 | "uid": "normal-timescaledb"
88 | },
89 | "editorMode": "code",
90 | "format": "table",
91 | "rawQuery": true,
92 | "rawSql": "SELECT p.count as \"Total\", t.count as \"Trended\", e.count as \"Equips\", m.count as \"Labeled\"\nFROM (SELECT COUNT(*)\n FROM points) as p\n CROSS JOIN (SELECT COUNT(*)\n FROM points\n where period <> '0 years 0 mons 0 days 0 hours 0 mins 0.0 secs') as t\n CROSS JOIN (SELECT COUNT(*)\n FROM layer_model lm\n join points p on p.id = lm.point_id\n WHERE lm.class IS NOT NULL\n and p.point_type = 'POINT') as m\n CROSS JOIN (SELECT COUNT(*)\n FROM points\n where point_type = 'EQUIPMENT') as e\n",
93 | "refId": "A",
94 | "sql": {
95 | "columns": [
96 | {
97 | "parameters": [],
98 | "type": "function"
99 | }
100 | ],
101 | "groupBy": [
102 | {
103 | "property": {
104 | "type": "string"
105 | },
106 | "type": "groupBy"
107 | }
108 | ],
109 | "limit": 50
110 | }
111 | }
112 | ],
113 | "title": "Points",
114 | "type": "gauge"
115 | },
116 | {
117 | "datasource": {
118 | "type": "grafana-postgresql-datasource",
119 | "uid": "normal-timescaledb"
120 | },
121 | "description": "",
122 | "fieldConfig": {
123 | "defaults": {
124 | "color": {
125 | "mode": "palette-classic"
126 | },
127 | "custom": {
128 | "hideFrom": {
129 | "legend": false,
130 | "tooltip": false,
131 | "viz": false
132 | }
133 | },
134 | "fieldMinMax": false,
135 | "mappings": []
136 | },
137 | "overrides": []
138 | },
139 | "gridPos": {
140 | "h": 14,
141 | "w": 12,
142 | "x": 12,
143 | "y": 1
144 | },
145 | "id": 4,
146 | "options": {
147 | "displayLabels": [],
148 | "legend": {
149 | "displayMode": "table",
150 | "placement": "bottom",
151 | "showLegend": true,
152 | "values": [
153 | "value",
154 | "percent"
155 | ]
156 | },
157 | "pieType": "donut",
158 | "reduceOptions": {
159 | "calcs": [
160 | "lastNotNull"
161 | ],
162 | "fields": "",
163 | "limit": 1,
164 | "values": true
165 | },
166 | "tooltip": {
167 | "hideZeros": false,
168 | "mode": "single",
169 | "sort": "none"
170 | }
171 | },
172 | "pluginVersion": "11.5.0",
173 | "targets": [
174 | {
175 | "datasource": {
176 | "type": "grafana-postgresql-datasource",
177 | "uid": "normal-timescaledb"
178 | },
179 | "editorMode": "code",
180 | "format": "table",
181 | "rawQuery": true,
182 | "rawSql": "SELECT device_prop_vendor_name as metric, count(*) as value FROM layer_hpl_bacnet_1\nWHERE type = 'OBJECT_DEVICE'\nGROUP BY device_prop_vendor_name",
183 | "refId": "A",
184 | "sql": {
185 | "columns": [
186 | {
187 | "parameters": [],
188 | "type": "function"
189 | }
190 | ],
191 | "groupBy": [
192 | {
193 | "property": {
194 | "type": "string"
195 | },
196 | "type": "groupBy"
197 | }
198 | ],
199 | "limit": 50
200 | }
201 | }
202 | ],
203 | "title": "Devices",
204 | "type": "piechart"
205 | },
206 | {
207 | "fieldConfig": {
208 | "defaults": {},
209 | "overrides": []
210 | },
211 | "gridPos": {
212 | "h": 8,
213 | "w": 12,
214 | "x": 0,
215 | "y": 7
216 | },
217 | "id": 7,
218 | "options": {
219 | "folderUID": "normal",
220 | "includeVars": false,
221 | "keepTime": false,
222 | "maxItems": 10,
223 | "query": "",
224 | "showFolderNames": true,
225 | "showHeadings": false,
226 | "showRecentlyViewed": false,
227 | "showSearch": true,
228 | "showStarred": false,
229 | "tags": []
230 | },
231 | "pluginVersion": "11.5.0",
232 | "title": "Dashboards",
233 | "type": "dashlist"
234 | },
235 | {
236 | "collapsed": false,
237 | "gridPos": {
238 | "h": 1,
239 | "w": 24,
240 | "x": 0,
241 | "y": 15
242 | },
243 | "id": 6,
244 | "panels": [],
245 | "title": "Equipment Schedule",
246 | "type": "row"
247 | },
248 | {
249 | "datasource": {
250 | "type": "grafana-postgresql-datasource",
251 | "uid": "normal-timescaledb"
252 | },
253 | "fieldConfig": {
254 | "defaults": {
255 | "color": {
256 | "mode": "continuous-BlPu"
257 | },
258 | "custom": {
259 | "align": "auto",
260 | "cellOptions": {
261 | "type": "auto"
262 | },
263 | "inspect": false
264 | },
265 | "fieldMinMax": false,
266 | "mappings": [],
267 | "thresholds": {
268 | "mode": "absolute",
269 | "steps": [
270 | {
271 | "color": "green",
272 | "value": null
273 | }
274 | ]
275 | }
276 | },
277 | "overrides": [
278 | {
279 | "matcher": {
280 | "id": "byName",
281 | "options": "0 Classes"
282 | },
283 | "properties": [
284 | {
285 | "id": "custom.cellOptions",
286 | "value": {
287 | "mode": "gradient",
288 | "type": "gauge",
289 | "valueDisplayMode": "color"
290 | }
291 | }
292 | ]
293 | },
294 | {
295 | "matcher": {
296 | "id": "byName",
297 | "options": "1-5 Classes"
298 | },
299 | "properties": [
300 | {
301 | "id": "custom.cellOptions",
302 | "value": {
303 | "mode": "gradient",
304 | "type": "gauge",
305 | "valueDisplayMode": "text"
306 | }
307 | }
308 | ]
309 | },
310 | {
311 | "matcher": {
312 | "id": "byName",
313 | "options": "6+ Classes"
314 | },
315 | "properties": [
316 | {
317 | "id": "custom.cellOptions",
318 | "value": {
319 | "mode": "gradient",
320 | "type": "gauge",
321 | "valueDisplayMode": "text"
322 | }
323 | }
324 | ]
325 | }
326 | ]
327 | },
328 | "gridPos": {
329 | "h": 8,
330 | "w": 12,
331 | "x": 0,
332 | "y": 16
333 | },
334 | "id": 2,
335 | "options": {
336 | "cellHeight": "sm",
337 | "footer": {
338 | "countRows": false,
339 | "fields": "",
340 | "reducer": [
341 | "sum"
342 | ],
343 | "show": false
344 | },
345 | "showHeader": true,
346 | "sortBy": [
347 | {
348 | "desc": true,
349 | "displayName": "points_count"
350 | }
351 | ]
352 | },
353 | "pluginVersion": "11.5.0",
354 | "targets": [
355 | {
356 | "datasource": {
357 | "type": "grafana-postgresql-datasource",
358 | "uid": "normal-timescaledb"
359 | },
360 | "editorMode": "code",
361 | "format": "table",
362 | "rawQuery": true,
363 | "rawSql": "with equips_points_stat as (SELECT lm.id, lm.\"equipTypeId\", COUNT(lm.class) as \"count\"\n FROM layer_model lm\n join points p on lm.point_id = p.id\n where p.point_type = 'EQUIPMENT'\n GROUP BY lm.id, lm.\"equipTypeId\")\nSELECT et.name as \"Equipment Type\",\n e.equips_count as \"Count\",\n p.points_count as \"Points\",\n s.\"0 Classes\",\n s.\"1-5 Classes\",\n s.\"6+ Classes\"\nFROM equipment_type et\n JOIN (SELECT \"equipTypeId\", count(*) as points_count\n FROM layer_model lm\n join points p on lm.point_id = p.id\n where p.point_type = 'POINT'\n GROUP BY \"equipTypeId\") p ON et.id = p.\"equipTypeId\"\n JOIN (SELECT \"equipTypeId\", count(*) as equips_count\n FROM layer_model lm\n join points p on lm.point_id = p.id\n where p.point_type = 'EQUIPMENT'\n GROUP BY \"equipTypeId\") e ON et.id = e.\"equipTypeId\"\n JOIN (SELECT \"equipTypeId\",\n count(*) filter ( where count = 0 ) as \"0 Classes\",\n count(*) filter ( where count between 1 and 5) AS \"1-5 Classes\",\n count(*) filter ( where count >= 6 ) as \"6+ Classes\"\n FROM equips_points_stat\n GROUP BY \"equipTypeId\") as s ON et.id = s.\"equipTypeId\"\n",
364 | "refId": "A",
365 | "sql": {
366 | "columns": [
367 | {
368 | "parameters": [],
369 | "type": "function"
370 | }
371 | ],
372 | "groupBy": [
373 | {
374 | "property": {
375 | "type": "string"
376 | },
377 | "type": "groupBy"
378 | }
379 | ],
380 | "limit": 50
381 | }
382 | }
383 | ],
384 | "title": "Equipment Types",
385 | "transformations": [
386 | {
387 | "id": "configFromData",
388 | "options": {
389 | "configRefId": "A",
390 | "mappings": [
391 | {
392 | "fieldName": "Count",
393 | "handlerKey": "max",
394 | "reducerId": "max"
395 | }
396 | ]
397 | }
398 | }
399 | ],
400 | "type": "table"
401 | },
402 | {
403 | "datasource": {
404 | "type": "grafana-postgresql-datasource",
405 | "uid": "normal-timescaledb"
406 | },
407 | "fieldConfig": {
408 | "defaults": {
409 | "color": {
410 | "mode": "continuous-BlPu"
411 | },
412 | "custom": {
413 | "axisBorderShow": false,
414 | "axisCenteredZero": false,
415 | "axisColorMode": "text",
416 | "axisLabel": "",
417 | "axisPlacement": "auto",
418 | "fillOpacity": 69,
419 | "gradientMode": "none",
420 | "hideFrom": {
421 | "legend": false,
422 | "tooltip": false,
423 | "viz": false
424 | },
425 | "lineWidth": 1,
426 | "scaleDistribution": {
427 | "type": "linear"
428 | },
429 | "thresholdsStyle": {
430 | "mode": "off"
431 | }
432 | },
433 | "fieldMinMax": false,
434 | "mappings": [],
435 | "thresholds": {
436 | "mode": "absolute",
437 | "steps": [
438 | {
439 | "color": "green",
440 | "value": null
441 | }
442 | ]
443 | }
444 | },
445 | "overrides": []
446 | },
447 | "gridPos": {
448 | "h": 8,
449 | "w": 12,
450 | "x": 12,
451 | "y": 16
452 | },
453 | "id": 1,
454 | "options": {
455 | "barRadius": 0,
456 | "barWidth": 0.5,
457 | "colorByField": "points",
458 | "fullHighlight": false,
459 | "groupWidth": 0.7,
460 | "legend": {
461 | "calcs": [],
462 | "displayMode": "list",
463 | "placement": "bottom",
464 | "showLegend": false
465 | },
466 | "orientation": "horizontal",
467 | "showValue": "auto",
468 | "stacking": "none",
469 | "tooltip": {
470 | "hideZeros": false,
471 | "mode": "single",
472 | "sort": "none"
473 | },
474 | "xField": "class",
475 | "xTickLabelRotation": 0,
476 | "xTickLabelSpacing": 100
477 | },
478 | "pluginVersion": "11.5.0",
479 | "targets": [
480 | {
481 | "datasource": {
482 | "type": "grafana-postgresql-datasource",
483 | "uid": "normal-timescaledb"
484 | },
485 | "editorMode": "code",
486 | "format": "table",
487 | "rawQuery": true,
488 | "rawSql": "SELECT\n class,\n count(*) points\nFROM\n layer_model lm\n join points p on p.id= lm.point_id\nWHERE\n lm.class IS NOT null and lm.class <>'' and p.point_type = 'POINT'\nGROUP BY\n lm.class \nORDER BY\n points DESC",
489 | "refId": "A",
490 | "sql": {
491 | "columns": [
492 | {
493 | "parameters": [],
494 | "type": "function"
495 | }
496 | ],
497 | "groupBy": [
498 | {
499 | "property": {
500 | "type": "string"
501 | },
502 | "type": "groupBy"
503 | }
504 | ],
505 | "limit": 50
506 | }
507 | }
508 | ],
509 | "title": "Point Classes",
510 | "type": "barchart"
511 | }
512 | ],
513 | "preload": false,
514 | "refresh": "",
515 | "schemaVersion": 40,
516 | "tags": [],
517 | "templating": {
518 | "list": []
519 | },
520 | "time": {
521 | "from": "now-6h",
522 | "to": "now"
523 | },
524 | "timepicker": {},
525 | "timezone": "",
526 | "title": "Home",
527 | "uid": "d719e98b-b843-4cb0-98f1-43967a0fbbea",
528 | "version": 1,
529 | "weekStart": ""
530 | }
--------------------------------------------------------------------------------
/solutions/idl/dashboards/normal/timeseries.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "type": "dashboard"
15 | }
16 | ]
17 | },
18 | "editable": true,
19 | "fiscalYearStartMonth": 0,
20 | "graphTooltip": 0,
21 | "id": 4,
22 | "links": [],
23 | "panels": [
24 | {
25 | "datasource": {
26 | "type": "grafana-postgresql-datasource",
27 | "uid": "normal-timescaledb"
28 | },
29 | "fieldConfig": {
30 | "defaults": {
31 | "color": {
32 | "mode": "palette-classic",
33 | "seriesBy": "last"
34 | },
35 | "custom": {
36 | "axisBorderShow": false,
37 | "axisCenteredZero": false,
38 | "axisColorMode": "text",
39 | "axisLabel": "",
40 | "axisPlacement": "auto",
41 | "barAlignment": 0,
42 | "barWidthFactor": 0.6,
43 | "drawStyle": "line",
44 | "fillOpacity": 0,
45 | "gradientMode": "none",
46 | "hideFrom": {
47 | "legend": false,
48 | "tooltip": false,
49 | "viz": false
50 | },
51 | "insertNulls": false,
52 | "lineInterpolation": "linear",
53 | "lineStyle": {
54 | "fill": "solid"
55 | },
56 | "lineWidth": 1,
57 | "pointSize": 5,
58 | "scaleDistribution": {
59 | "type": "linear"
60 | },
61 | "showPoints": "auto",
62 | "spanNulls": false,
63 | "stacking": {
64 | "group": "A",
65 | "mode": "none"
66 | },
67 | "thresholdsStyle": {
68 | "mode": "off"
69 | }
70 | },
71 | "fieldMinMax": false,
72 | "mappings": [],
73 | "thresholds": {
74 | "mode": "absolute",
75 | "steps": [
76 | {
77 | "color": "green",
78 | "value": null
79 | }
80 | ]
81 | }
82 | },
83 | "overrides": []
84 | },
85 | "gridPos": {
86 | "h": 10,
87 | "w": 24,
88 | "x": 0,
89 | "y": 0
90 | },
91 | "id": 1,
92 | "options": {
93 | "legend": {
94 | "calcs": [],
95 | "displayMode": "list",
96 | "placement": "bottom",
97 | "showLegend": true
98 | },
99 | "tooltip": {
100 | "hideZeros": false,
101 | "mode": "multi",
102 | "sort": "none"
103 | }
104 | },
105 | "pluginVersion": "11.5.0",
106 | "targets": [
107 | {
108 | "datasource": {
109 | "type": "grafana-postgresql-datasource",
110 | "uid": "normal-timescaledb"
111 | },
112 | "editorMode": "code",
113 | "format": "table",
114 | "rawQuery": true,
115 | "rawSql": "SELECT lm.\"equipRef\",\n d.value as \"Value\",\n $__time(d.time)\nFROM\n data d\n join points p on d.point_id = p.id\n join layer_model lm on p.id = lm.point_id\nWHERE\n $__timeFilter(d.time)\n AND ('${class:csv}' = '''' OR lm.class IN ($class)) \nORDER BY\n time",
116 | "refId": "A",
117 | "sql": {
118 | "columns": [
119 | {
120 | "parameters": [],
121 | "type": "function"
122 | }
123 | ],
124 | "groupBy": [
125 | {
126 | "property": {
127 | "type": "string"
128 | },
129 | "type": "groupBy"
130 | }
131 | ],
132 | "limit": 50
133 | }
134 | }
135 | ],
136 | "title": "Equip",
137 | "transformations": [
138 | {
139 | "id": "prepareTimeSeries",
140 | "options": {
141 | "format": "multi"
142 | }
143 | }
144 | ],
145 | "type": "timeseries"
146 | },
147 | {
148 | "datasource": {
149 | "type": "grafana-postgresql-datasource",
150 | "uid": "normal-timescaledb"
151 | },
152 | "fieldConfig": {
153 | "defaults": {
154 | "color": {
155 | "mode": "palette-classic",
156 | "seriesBy": "last"
157 | },
158 | "custom": {
159 | "axisBorderShow": false,
160 | "axisCenteredZero": false,
161 | "axisColorMode": "text",
162 | "axisLabel": "",
163 | "axisPlacement": "auto",
164 | "barAlignment": 0,
165 | "barWidthFactor": 0.6,
166 | "drawStyle": "line",
167 | "fillOpacity": 0,
168 | "gradientMode": "none",
169 | "hideFrom": {
170 | "legend": false,
171 | "tooltip": false,
172 | "viz": false
173 | },
174 | "insertNulls": false,
175 | "lineInterpolation": "linear",
176 | "lineStyle": {
177 | "fill": "solid"
178 | },
179 | "lineWidth": 1,
180 | "pointSize": 5,
181 | "scaleDistribution": {
182 | "type": "linear"
183 | },
184 | "showPoints": "auto",
185 | "spanNulls": false,
186 | "stacking": {
187 | "group": "A",
188 | "mode": "none"
189 | },
190 | "thresholdsStyle": {
191 | "mode": "off"
192 | }
193 | },
194 | "fieldMinMax": false,
195 | "mappings": [],
196 | "thresholds": {
197 | "mode": "absolute",
198 | "steps": [
199 | {
200 | "color": "green",
201 | "value": null
202 | }
203 | ]
204 | }
205 | },
206 | "overrides": []
207 | },
208 | "gridPos": {
209 | "h": 10,
210 | "w": 24,
211 | "x": 0,
212 | "y": 10
213 | },
214 | "id": 2,
215 | "options": {
216 | "legend": {
217 | "calcs": [],
218 | "displayMode": "list",
219 | "placement": "bottom",
220 | "showLegend": true
221 | },
222 | "tooltip": {
223 | "hideZeros": false,
224 | "mode": "multi",
225 | "sort": "none"
226 | }
227 | },
228 | "pluginVersion": "11.5.0",
229 | "targets": [
230 | {
231 | "datasource": {
232 | "type": "grafana-postgresql-datasource",
233 | "uid": "normal-timescaledb"
234 | },
235 | "editorMode": "code",
236 | "format": "table",
237 | "rawQuery": true,
238 | "rawSql": "SELECT\n p.name,\n d.value as \"Value\",\n $__time(d.time)\nFROM\n data d\n join points p on d.point_id = p.id\n join layer_model lm on p.id = lm.point_id\nWHERE\n $__timeFilter(d.time)\n AND ('${class:csv}' = '''' OR lm.class IN ($class)) \nORDER BY\n time",
239 | "refId": "A",
240 | "sql": {
241 | "columns": [
242 | {
243 | "parameters": [],
244 | "type": "function"
245 | }
246 | ],
247 | "groupBy": [
248 | {
249 | "property": {
250 | "type": "string"
251 | },
252 | "type": "groupBy"
253 | }
254 | ],
255 | "limit": 50
256 | }
257 | }
258 | ],
259 | "title": "Object Name",
260 | "transformations": [
261 | {
262 | "id": "prepareTimeSeries",
263 | "options": {
264 | "format": "multi"
265 | }
266 | }
267 | ],
268 | "type": "timeseries"
269 | }
270 | ],
271 | "preload": false,
272 | "refresh": "",
273 | "schemaVersion": 40,
274 | "tags": [],
275 | "templating": {
276 | "list": [
277 | {
278 | "allValue": "''",
279 | "current": {
280 | "text": [
281 | "discharge-air-flow-sensor"
282 | ],
283 | "value": [
284 | "discharge-air-flow-sensor"
285 | ]
286 | },
287 | "datasource": {
288 | "type": "grafana-postgresql-datasource",
289 | "uid": "normal-timescaledb"
290 | },
291 | "definition": "SELECT DISTINCT class\nFROM layer_model lm\n join points p on lm.point_id = p.id\nWHERE p.point_type = 'POINT' and class IS NOT null and class <> ''",
292 | "includeAll": true,
293 | "label": "Class",
294 | "multi": true,
295 | "name": "class",
296 | "options": [],
297 | "query": "SELECT DISTINCT class\nFROM layer_model lm\n join points p on lm.point_id = p.id\nWHERE p.point_type = 'POINT' and class IS NOT null and class <> ''",
298 | "refresh": 1,
299 | "regex": "",
300 | "type": "query"
301 | }
302 | ]
303 | },
304 | "time": {
305 | "from": "now-6h",
306 | "to": "now"
307 | },
308 | "timepicker": {},
309 | "timezone": "",
310 | "title": "TimeSeries",
311 | "uid": "b3474cbc-2aa8-49de-8fa4-68e487bcc026",
312 | "version": 4,
313 | "weekStart": ""
314 | }
--------------------------------------------------------------------------------
/solutions/idl/docker-compose.yml:
--------------------------------------------------------------------------------
1 |
2 | version: "2.2"
3 | services:
4 |
5 | # the main container which runs all the NF services, load balancer,
6 | # and management console.
7 | nf:
8 | # image: normal.azurecr.io/normalframework/nf-full:3.4
9 | image: normal.azurecr.io/normalframework/nf-full:3.8
10 | network_mode: host
11 | depends_on:
12 | - redis
13 | tmpfs:
14 | - /run:exec
15 | - /etc/nginx/conf.d/
16 | - /tmp
17 | environment:
18 | - PGDATABASE=postgres
19 | - PGUSER=postgres
20 | - PGPASSWORD=password
21 | - PGHOST=localhost
22 | # set it behind a reverse proxy
23 | # - APPLICATION_PATH=/http-8080/localhost
24 | # set to enable username/password on the admin console + API
25 | # - CONSOLE_USERNAME=admin
26 | # - CONSOLE_PASSWORD=pw
27 |
28 | # if you want data to be persistent, you need to mount /var in the container
29 | # a volume
30 | # volumes:
31 | # - /tmp/nf:/var
32 |
33 | # Most persistent data is in redis. This is essentially an
34 | # unmodified upstream with RediSearch and RedisTimeseries installed.
35 | redis:
36 | ports:
37 | - "6379:6379"
38 | image: normal.azurecr.io/normalframework/redis:3.8
39 | # image: normal.azurecr.io/normalframework/redis:3.4
40 | # volumes:
41 | # - /tmp/nf-redis:/data
42 |
43 | timescaledb:
44 | ports:
45 | - "5432:5432"
46 | image: timescale/timescaledb:latest-pg16
47 | environment:
48 | - POSTGRES_PASSWORD=password
49 | # change this to set a persistent data location
50 | # volumes:
51 | # - /tmp/nf-timescale:/home/postgres/pgdata/data
52 | networks:
53 | - idlnet
54 |
55 | grafana:
56 | image: grafana/grafana-enterprise
57 | volumes:
58 | - ./provisioning:/etc/grafana/provisioning
59 | - ./dashboards:/etc/dashboards
60 | - ./grafana.ini:/etc/grafana/grafana.ini
61 | - ./assets/logo.svg:/usr/share/grafana/public/img/grafana_icon.svg
62 | - ./assets/favicon.ico:/usr/share/grafana/public/img/fav32.png
63 | - ./assets/favicon.ico:/usr/share/grafana/public/img/apple-touch-icon.png
64 | ports:
65 | - "3000:3000"
66 | networks:
67 | - idlnet
68 |
69 |
70 | networks:
71 | idlnet:
72 | name: idlnet
73 |
74 |
--------------------------------------------------------------------------------
/solutions/idl/mosquitto.conf:
--------------------------------------------------------------------------------
1 | # Config file for mosquitto
2 | #
3 | # See mosquitto.conf(5) for more information.
4 | #
5 | # Default values are shown, uncomment to change.
6 | #
7 | # Use the # character to indicate a comment, but only if it is the
8 | # very first character on the line.
9 |
10 | # =================================================================
11 | # General configuration
12 | # =================================================================
13 |
14 | # Use per listener security settings.
15 | #
16 | # It is recommended this option be set before any other options.
17 | #
18 | # If this option is set to true, then all authentication and access control
19 | # options are controlled on a per listener basis. The following options are
20 | # affected:
21 | #
22 | # acl_file
23 | allow_anonymous true
24 | # allow_zero_length_clientid
25 | # auto_id_prefix
26 | # password_file
27 | # plugin
28 | # plugin_opt_*
29 | # psk_file
30 | #
31 | # Note that if set to true, then a durable client (i.e. with clean session set
32 | # to false) that has disconnected will use the ACL settings defined for the
33 | # listener that it was most recently connected to.
34 | #
35 | # The default behaviour is for this to be set to false, which maintains the
36 | # setting behaviour from previous versions of mosquitto.
37 | #per_listener_settings false
38 |
39 |
40 | # This option controls whether a client is allowed to connect with a zero
41 | # length client id or not. This option only affects clients using MQTT v3.1.1
42 | # and later. If set to false, clients connecting with a zero length client id
43 | # are disconnected. If set to true, clients will be allocated a client id by
44 | # the broker. This means it is only useful for clients with clean session set
45 | # to true.
46 | #allow_zero_length_clientid true
47 |
48 | # If allow_zero_length_clientid is true, this option allows you to set a prefix
49 | # to automatically generated client ids to aid visibility in logs.
50 | # Defaults to 'auto-'
51 | #auto_id_prefix auto-
52 |
53 | # This option affects the scenario when a client subscribes to a topic that has
54 | # retained messages. It is possible that the client that published the retained
55 | # message to the topic had access at the time they published, but that access
56 | # has been subsequently removed. If check_retain_source is set to true, the
57 | # default, the source of a retained message will be checked for access rights
58 | # before it is republished. When set to false, no check will be made and the
59 | # retained message will always be published. This affects all listeners.
60 | #check_retain_source true
61 |
62 | # QoS 1 and 2 messages will be allowed inflight per client until this limit
63 | # is exceeded. Defaults to 0. (No maximum)
64 | # See also max_inflight_messages
65 | #max_inflight_bytes 0
66 |
67 | # The maximum number of QoS 1 and 2 messages currently inflight per
68 | # client.
69 | # This includes messages that are partway through handshakes and
70 | # those that are being retried. Defaults to 20. Set to 0 for no
71 | # maximum. Setting to 1 will guarantee in-order delivery of QoS 1
72 | # and 2 messages.
73 | #max_inflight_messages 20
74 |
75 | # For MQTT v5 clients, it is possible to have the server send a "server
76 | # keepalive" value that will override the keepalive value set by the client.
77 | # This is intended to be used as a mechanism to say that the server will
78 | # disconnect the client earlier than it anticipated, and that the client should
79 | # use the new keepalive value. The max_keepalive option allows you to specify
80 | # that clients may only connect with keepalive less than or equal to this
81 | # value, otherwise they will be sent a server keepalive telling them to use
82 | # max_keepalive. This only applies to MQTT v5 clients. The default, and maximum
83 | # value allowable, is 65535.
84 | #
85 | # Set to 0 to allow clients to set keepalive = 0, which means no keepalive
86 | # checks are made and the client will never be disconnected by the broker if no
87 | # messages are received. You should be very sure this is the behaviour that you
88 | # want.
89 | #
90 | # For MQTT v3.1.1 and v3.1 clients, there is no mechanism to tell the client
91 | # what keepalive value they should use. If an MQTT v3.1.1 or v3.1 client
92 | # specifies a keepalive time greater than max_keepalive they will be sent a
93 | # CONNACK message with the "identifier rejected" reason code, and disconnected.
94 | #
95 | #max_keepalive 65535
96 |
97 | # For MQTT v5 clients, it is possible to have the server send a "maximum packet
98 | # size" value that will instruct the client it will not accept MQTT packets
99 | # with size greater than max_packet_size bytes. This applies to the full MQTT
100 | # packet, not just the payload. Setting this option to a positive value will
101 | # set the maximum packet size to that number of bytes. If a client sends a
102 | # packet which is larger than this value, it will be disconnected. This applies
103 | # to all clients regardless of the protocol version they are using, but v3.1.1
104 | # and earlier clients will of course not have received the maximum packet size
105 | # information. Defaults to no limit. Setting below 20 bytes is forbidden
106 | # because it is likely to interfere with ordinary client operation, even with
107 | # very small payloads.
108 | #max_packet_size 0
109 |
110 | # QoS 1 and 2 messages above those currently in-flight will be queued per
111 | # client until this limit is exceeded. Defaults to 0. (No maximum)
112 | # See also max_queued_messages.
113 | # If both max_queued_messages and max_queued_bytes are specified, packets will
114 | # be queued until the first limit is reached.
115 | #max_queued_bytes 0
116 |
117 | # Set the maximum QoS supported. Clients publishing at a QoS higher than
118 | # specified here will be disconnected.
119 | #max_qos 2
120 |
121 | # The maximum number of QoS 1 and 2 messages to hold in a queue per client
122 | # above those that are currently in-flight. Defaults to 1000. Set
123 | # to 0 for no maximum (not recommended).
124 | # See also queue_qos0_messages.
125 | # See also max_queued_bytes.
126 | #max_queued_messages 1000
127 | #
128 | # This option sets the maximum number of heap memory bytes that the broker will
129 | # allocate, and hence sets a hard limit on memory use by the broker. Memory
130 | # requests that exceed this value will be denied. The effect will vary
131 | # depending on what has been denied. If an incoming message is being processed,
132 | # then the message will be dropped and the publishing client will be
133 | # disconnected. If an outgoing message is being sent, then the individual
134 | # message will be dropped and the receiving client will be disconnected.
135 | # Defaults to no limit.
136 | #memory_limit 0
137 |
138 | # This option sets the maximum publish payload size that the broker will allow.
139 | # Received messages that exceed this size will not be accepted by the broker.
140 | # The default value is 0, which means that all valid MQTT messages are
141 | # accepted. MQTT imposes a maximum payload size of 268435455 bytes.
142 | #message_size_limit 0
143 |
144 | # This option allows the session of persistent clients (those with clean
145 | # session set to false) that are not currently connected to be removed if they
146 | # do not reconnect within a certain time frame. This is a non-standard option
147 | # in MQTT v3.1. MQTT v3.1.1 and v5.0 allow brokers to remove client sessions.
148 | #
149 | # Badly designed clients may set clean session to false whilst using a randomly
150 | # generated client id. This leads to persistent clients that connect once and
151 | # never reconnect. This option allows these clients to be removed. This option
152 | # allows persistent clients (those with clean session set to false) to be
153 | # removed if they do not reconnect within a certain time frame.
154 | #
155 | # The expiration period should be an integer followed by one of h d w m y for
156 | # hour, day, week, month and year respectively. For example
157 | #
158 | # persistent_client_expiration 2m
159 | # persistent_client_expiration 14d
160 | # persistent_client_expiration 1y
161 | #
162 | # The default if not set is to never expire persistent clients.
163 | #persistent_client_expiration
164 |
165 | # Write process id to a file. Default is a blank string which means
166 | # a pid file shouldn't be written.
167 | # This should be set to /var/run/mosquitto/mosquitto.pid if mosquitto is
168 | # being run automatically on boot with an init script and
169 | # start-stop-daemon or similar.
170 | #pid_file
171 |
172 | # Set to true to queue messages with QoS 0 when a persistent client is
173 | # disconnected. These messages are included in the limit imposed by
174 | # max_queued_messages and max_queued_bytes
175 | # Defaults to false.
176 | # This is a non-standard option for the MQTT v3.1 spec but is allowed in
177 | # v3.1.1.
178 | #queue_qos0_messages false
179 |
180 | # Set to false to disable retained message support. If a client publishes a
181 | # message with the retain bit set, it will be disconnected if this is set to
182 | # false.
183 | #retain_available true
184 |
185 | # Disable Nagle's algorithm on client sockets. This has the effect of reducing
186 | # latency of individual messages at the potential cost of increasing the number
187 | # of packets being sent.
188 | #set_tcp_nodelay false
189 |
190 | # Time in seconds between updates of the $SYS tree.
191 | # Set to 0 to disable the publishing of the $SYS tree.
192 | #sys_interval 10
193 |
194 | # The MQTT specification requires that the QoS of a message delivered to a
195 | # subscriber is never upgraded to match the QoS of the subscription. Enabling
196 | # this option changes this behaviour. If upgrade_outgoing_qos is set true,
197 | # messages sent to a subscriber will always match the QoS of its subscription.
198 | # This is a non-standard option explicitly disallowed by the spec.
199 | #upgrade_outgoing_qos false
200 |
201 | # When run as root, drop privileges to this user and its primary
202 | # group.
203 | # Set to root to stay as root, but this is not recommended.
204 | # If set to "mosquitto", or left unset, and the "mosquitto" user does not exist
205 | # then it will drop privileges to the "nobody" user instead.
206 | # If run as a non-root user, this setting has no effect.
207 | # Note that on Windows this has no effect and so mosquitto should be started by
208 | # the user you wish it to run as.
209 | #user mosquitto
210 |
211 | # =================================================================
212 | # Listeners
213 | # =================================================================
214 |
215 | # Listen on a port/ip address combination. By using this variable
216 | # multiple times, mosquitto can listen on more than one port. If
217 | # this variable is used and neither bind_address nor port given,
218 | # then the default listener will not be started.
219 | # The port number to listen on must be given. Optionally, an ip
220 | # address or host name may be supplied as a second argument. In
221 | # this case, mosquitto will attempt to bind the listener to that
222 | # address and so restrict access to the associated network and
223 | # interface. By default, mosquitto will listen on all interfaces.
224 | # Note that for a websockets listener it is not possible to bind to a host
225 | # name.
226 | #
227 | # On systems that support Unix Domain Sockets, it is also possible
228 | # to create a # Unix socket rather than opening a TCP socket. In
229 | # this case, the port number should be set to 0 and a unix socket
230 | # path must be provided, e.g.
231 | # listener 0 /tmp/mosquitto.sock
232 | #
233 | # listener port-number [ip address/host name/unix socket path]
234 | listener 1883 0.0.0.0
235 |
236 | # By default, a listener will attempt to listen on all supported IP protocol
237 | # versions. If you do not have an IPv4 or IPv6 interface you may wish to
238 | # disable support for either of those protocol versions. In particular, note
239 | # that due to the limitations of the websockets library, it will only ever
240 | # attempt to open IPv6 sockets if IPv6 support is compiled in, and so will fail
241 | # if IPv6 is not available.
242 | #
243 | # Set to `ipv4` to force the listener to only use IPv4, or set to `ipv6` to
244 | # force the listener to only use IPv6. If you want support for both IPv4 and
245 | # IPv6, then do not use the socket_domain option.
246 | #
247 | #socket_domain
248 |
249 | # Bind the listener to a specific interface. This is similar to
250 | # the [ip address/host name] part of the listener definition, but is useful
251 | # when an interface has multiple addresses or the address may change. If used
252 | # with the [ip address/host name] part of the listener definition, then the
253 | # bind_interface option will take priority.
254 | # Not available on Windows.
255 | #
256 | # Example: bind_interface eth0
257 | #bind_interface
258 |
259 | # When a listener is using the websockets protocol, it is possible to serve
260 | # http data as well. Set http_dir to a directory which contains the files you
261 | # wish to serve. If this option is not specified, then no normal http
262 | # connections will be possible.
263 | #http_dir
264 |
265 | # The maximum number of client connections to allow. This is
266 | # a per listener setting.
267 | # Default is -1, which means unlimited connections.
268 | # Note that other process limits mean that unlimited connections
269 | # are not really possible. Typically the default maximum number of
270 | # connections possible is around 1024.
271 | #max_connections -1
272 |
273 | # The listener can be restricted to operating within a topic hierarchy using
274 | # the mount_point option. This is achieved be prefixing the mount_point string
275 | # to all topics for any clients connected to this listener. This prefixing only
276 | # happens internally to the broker; the client will not see the prefix.
277 | #mount_point
278 |
279 | # Choose the protocol to use when listening.
280 | # This can be either mqtt or websockets.
281 | # Certificate based TLS may be used with websockets, except that only the
282 | # cafile, certfile, keyfile, ciphers, and ciphers_tls13 options are supported.
283 | #protocol mqtt
284 |
285 | # Set use_username_as_clientid to true to replace the clientid that a client
286 | # connected with with its username. This allows authentication to be tied to
287 | # the clientid, which means that it is possible to prevent one client
288 | # disconnecting another by using the same clientid.
289 | # If a client connects with no username it will be disconnected as not
290 | # authorised when this option is set to true.
291 | # Do not use in conjunction with clientid_prefixes.
292 | # See also use_identity_as_username.
293 | # This does not apply globally, but on a per-listener basis.
294 | #use_username_as_clientid
295 |
296 | # Change the websockets headers size. This is a global option, it is not
297 | # possible to set per listener. This option sets the size of the buffer used in
298 | # the libwebsockets library when reading HTTP headers. If you are passing large
299 | # header data such as cookies then you may need to increase this value. If left
300 | # unset, or set to 0, then the default of 1024 bytes will be used.
301 | #websockets_headers_size
302 |
303 | # -----------------------------------------------------------------
304 | # Certificate based SSL/TLS support
305 | # -----------------------------------------------------------------
306 | # The following options can be used to enable certificate based SSL/TLS support
307 | # for this listener. Note that the recommended port for MQTT over TLS is 8883,
308 | # but this must be set manually.
309 | #
310 | # See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS
311 | # support" section. Only one of certificate or PSK encryption support can be
312 | # enabled for any listener.
313 |
314 | # Both of certfile and keyfile must be defined to enable certificate based
315 | # TLS encryption.
316 |
317 | # Path to the PEM encoded server certificate.
318 | #certfile
319 |
320 | # Path to the PEM encoded keyfile.
321 | #keyfile
322 |
323 | # If you wish to control which encryption ciphers are used, use the ciphers
324 | # option. The list of available ciphers can be optained using the "openssl
325 | # ciphers" command and should be provided in the same format as the output of
326 | # that command. This applies to TLS 1.2 and earlier versions only. Use
327 | # ciphers_tls1.3 for TLS v1.3.
328 | #ciphers
329 |
330 | # Choose which TLS v1.3 ciphersuites are used for this listener.
331 | # Defaults to "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256"
332 | #ciphers_tls1.3
333 |
334 | # If you have require_certificate set to true, you can create a certificate
335 | # revocation list file to revoke access to particular client certificates. If
336 | # you have done this, use crlfile to point to the PEM encoded revocation file.
337 | #crlfile
338 |
339 | # To allow the use of ephemeral DH key exchange, which provides forward
340 | # security, the listener must load DH parameters. This can be specified with
341 | # the dhparamfile option. The dhparamfile can be generated with the command
342 | # e.g. "openssl dhparam -out dhparam.pem 2048"
343 | #dhparamfile
344 |
345 | # By default an TLS enabled listener will operate in a similar fashion to a
346 | # https enabled web server, in that the server has a certificate signed by a CA
347 | # and the client will verify that it is a trusted certificate. The overall aim
348 | # is encryption of the network traffic. By setting require_certificate to true,
349 | # the client must provide a valid certificate in order for the network
350 | # connection to proceed. This allows access to the broker to be controlled
351 | # outside of the mechanisms provided by MQTT.
352 | #require_certificate false
353 |
354 | # cafile and capath define methods of accessing the PEM encoded
355 | # Certificate Authority certificates that will be considered trusted when
356 | # checking incoming client certificates.
357 | # cafile defines the path to a file containing the CA certificates.
358 | # capath defines a directory that will be searched for files
359 | # containing the CA certificates. For capath to work correctly, the
360 | # certificate files must have ".crt" as the file ending and you must run
361 | # "openssl rehash " each time you add/remove a certificate.
362 | #cafile
363 | #capath
364 |
365 |
366 | # If require_certificate is true, you may set use_identity_as_username to true
367 | # to use the CN value from the client certificate as a username. If this is
368 | # true, the password_file option will not be used for this listener.
369 | #use_identity_as_username false
370 |
371 | # -----------------------------------------------------------------
372 | # Pre-shared-key based SSL/TLS support
373 | # -----------------------------------------------------------------
374 | # The following options can be used to enable PSK based SSL/TLS support for
375 | # this listener. Note that the recommended port for MQTT over TLS is 8883, but
376 | # this must be set manually.
377 | #
378 | # See also the mosquitto-tls man page and the "Certificate based SSL/TLS
379 | # support" section. Only one of certificate or PSK encryption support can be
380 | # enabled for any listener.
381 |
382 | # The psk_hint option enables pre-shared-key support for this listener and also
383 | # acts as an identifier for this listener. The hint is sent to clients and may
384 | # be used locally to aid authentication. The hint is a free form string that
385 | # doesn't have much meaning in itself, so feel free to be creative.
386 | # If this option is provided, see psk_file to define the pre-shared keys to be
387 | # used or create a security plugin to handle them.
388 | #psk_hint
389 |
390 | # When using PSK, the encryption ciphers used will be chosen from the list of
391 | # available PSK ciphers. If you want to control which ciphers are available,
392 | # use the "ciphers" option. The list of available ciphers can be optained
393 | # using the "openssl ciphers" command and should be provided in the same format
394 | # as the output of that command.
395 | #ciphers
396 |
397 | # Set use_identity_as_username to have the psk identity sent by the client used
398 | # as its username. Authentication will be carried out using the PSK rather than
399 | # the MQTT username/password and so password_file will not be used for this
400 | # listener.
401 | #use_identity_as_username false
402 |
403 |
404 | # =================================================================
405 | # Persistence
406 | # =================================================================
407 |
408 | # If persistence is enabled, save the in-memory database to disk
409 | # every autosave_interval seconds. If set to 0, the persistence
410 | # database will only be written when mosquitto exits. See also
411 | # autosave_on_changes.
412 | # Note that writing of the persistence database can be forced by
413 | # sending mosquitto a SIGUSR1 signal.
414 | #autosave_interval 1800
415 |
416 | # If true, mosquitto will count the number of subscription changes, retained
417 | # messages received and queued messages and if the total exceeds
418 | # autosave_interval then the in-memory database will be saved to disk.
419 | # If false, mosquitto will save the in-memory database to disk by treating
420 | # autosave_interval as a time in seconds.
421 | #autosave_on_changes false
422 |
423 | # Save persistent message data to disk (true/false).
424 | # This saves information about all messages, including
425 | # subscriptions, currently in-flight messages and retained
426 | # messages.
427 | # retained_persistence is a synonym for this option.
428 | #persistence false
429 |
430 | # The filename to use for the persistent database, not including
431 | # the path.
432 | #persistence_file mosquitto.db
433 |
434 | # Location for persistent database.
435 | # Default is an empty string (current directory).
436 | # Set to e.g. /var/lib/mosquitto if running as a proper service on Linux or
437 | # similar.
438 | #persistence_location
439 |
440 |
441 | # =================================================================
442 | # Logging
443 | # =================================================================
444 |
445 | # Places to log to. Use multiple log_dest lines for multiple
446 | # logging destinations.
447 | # Possible destinations are: stdout stderr syslog topic file dlt
448 | #
449 | # stdout and stderr log to the console on the named output.
450 | #
451 | # syslog uses the userspace syslog facility which usually ends up
452 | # in /var/log/messages or similar.
453 | #
454 | # topic logs to the broker topic '$SYS/broker/log/',
455 | # where severity is one of D, E, W, N, I, M which are debug, error,
456 | # warning, notice, information and message. Message type severity is used by
457 | # the subscribe/unsubscribe log_types and publishes log messages to
458 | # $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe.
459 | #
460 | # The file destination requires an additional parameter which is the file to be
461 | # logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be
462 | # closed and reopened when the broker receives a HUP signal. Only a single file
463 | # destination may be configured.
464 | #
465 | # The dlt destination is for the automotive `Diagnostic Log and Trace` tool.
466 | # This requires that Mosquitto has been compiled with DLT support.
467 | #
468 | # Note that if the broker is running as a Windows service it will default to
469 | # "log_dest none" and neither stdout nor stderr logging is available.
470 | # Use "log_dest none" if you wish to disable logging.
471 | #log_dest stderr
472 |
473 | # Types of messages to log. Use multiple log_type lines for logging
474 | # multiple types of messages.
475 | # Possible types are: debug, error, warning, notice, information,
476 | # none, subscribe, unsubscribe, websockets, all.
477 | # Note that debug type messages are for decoding the incoming/outgoing
478 | # network packets. They are not logged in "topics".
479 | #log_type error
480 | #log_type warning
481 | #log_type notice
482 | #log_type information
483 |
484 |
485 | # If set to true, client connection and disconnection messages will be included
486 | # in the log.
487 | #connection_messages true
488 |
489 | # If using syslog logging (not on Windows), messages will be logged to the
490 | # "daemon" facility by default. Use the log_facility option to choose which of
491 | # local0 to local7 to log to instead. The option value should be an integer
492 | # value, e.g. "log_facility 5" to use local5.
493 | #log_facility
494 |
495 | # If set to true, add a timestamp value to each log message.
496 | #log_timestamp true
497 |
498 | # Set the format of the log timestamp. If left unset, this is the number of
499 | # seconds since the Unix epoch.
500 | # This is a free text string which will be passed to the strftime function. To
501 | # get an ISO 8601 datetime, for example:
502 | # log_timestamp_format %Y-%m-%dT%H:%M:%S
503 | #log_timestamp_format
504 |
505 | # Change the websockets logging level. This is a global option, it is not
506 | # possible to set per listener. This is an integer that is interpreted by
507 | # libwebsockets as a bit mask for its lws_log_levels enum. See the
508 | # libwebsockets documentation for more details. "log_type websockets" must also
509 | # be enabled.
510 | #websockets_log_level 0
511 |
512 |
513 | # =================================================================
514 | # Security
515 | # =================================================================
516 |
517 | # If set, only clients that have a matching prefix on their
518 | # clientid will be allowed to connect to the broker. By default,
519 | # all clients may connect.
520 | # For example, setting "secure-" here would mean a client "secure-
521 | # client" could connect but another with clientid "mqtt" couldn't.
522 | #clientid_prefixes
523 |
524 | # Boolean value that determines whether clients that connect
525 | # without providing a username are allowed to connect. If set to
526 | # false then a password file should be created (see the
527 | # password_file option) to control authenticated client access.
528 | #
529 | # Defaults to false, unless there are no listeners defined in the configuration
530 | # file, in which case it is set to true, but connections are only allowed from
531 | # the local machine.
532 | allow_anonymous true
533 |
534 | # -----------------------------------------------------------------
535 | # Default authentication and topic access control
536 | # -----------------------------------------------------------------
537 |
538 | # Control access to the broker using a password file. This file can be
539 | # generated using the mosquitto_passwd utility. If TLS support is not compiled
540 | # into mosquitto (it is recommended that TLS support should be included) then
541 | # plain text passwords are used, in which case the file should be a text file
542 | # with lines in the format:
543 | # username:password
544 | # The password (and colon) may be omitted if desired, although this
545 | # offers very little in the way of security.
546 | #
547 | # See the TLS client require_certificate and use_identity_as_username options
548 | # for alternative authentication options. If a plugin is used as well as
549 | # password_file, the plugin check will be made first.
550 | #password_file
551 |
552 | # Access may also be controlled using a pre-shared-key file. This requires
553 | # TLS-PSK support and a listener configured to use it. The file should be text
554 | # lines in the format:
555 | # identity:key
556 | # The key should be in hexadecimal format without a leading "0x".
557 | # If an plugin is used as well, the plugin check will be made first.
558 | #psk_file
559 |
560 | # Control access to topics on the broker using an access control list
561 | # file. If this parameter is defined then only the topics listed will
562 | # have access.
563 | # If the first character of a line of the ACL file is a # it is treated as a
564 | # comment.
565 | # Topic access is added with lines of the format:
566 | #
567 | # topic [read|write|readwrite|deny]
568 | #
569 | # The access type is controlled using "read", "write", "readwrite" or "deny".
570 | # This parameter is optional (unless contains a space character) - if
571 | # not given then the access is read/write. can contain the + or #
572 | # wildcards as in subscriptions.
573 | #
574 | # The "deny" option can used to explicity deny access to a topic that would
575 | # otherwise be granted by a broader read/write/readwrite statement. Any "deny"
576 | # topics are handled before topics that grant read/write access.
577 | #
578 | # The first set of topics are applied to anonymous clients, assuming
579 | # allow_anonymous is true. User specific topic ACLs are added after a
580 | # user line as follows:
581 | #
582 | # user
583 | #
584 | # The username referred to here is the same as in password_file. It is
585 | # not the clientid.
586 | #
587 | #
588 | # If is also possible to define ACLs based on pattern substitution within the
589 | # topic. The patterns available for substition are:
590 | #
591 | # %c to match the client id of the client
592 | # %u to match the username of the client
593 | #
594 | # The substitution pattern must be the only text for that level of hierarchy.
595 | #
596 | # The form is the same as for the topic keyword, but using pattern as the
597 | # keyword.
598 | # Pattern ACLs apply to all users even if the "user" keyword has previously
599 | # been given.
600 | #
601 | # If using bridges with usernames and ACLs, connection messages can be allowed
602 | # with the following pattern:
603 | # pattern write $SYS/broker/connection/%c/state
604 | #
605 | # pattern [read|write|readwrite]
606 | #
607 | # Example:
608 | #
609 | # pattern write sensor/%u/data
610 | #
611 | # If an plugin is used as well as acl_file, the plugin check will be
612 | # made first.
613 | #acl_file
614 |
615 | # -----------------------------------------------------------------
616 | # External authentication and topic access plugin options
617 | # -----------------------------------------------------------------
618 |
619 | # External authentication and access control can be supported with the
620 | # plugin option. This is a path to a loadable plugin. See also the
621 | # plugin_opt_* options described below.
622 | #
623 | # The plugin option can be specified multiple times to load multiple
624 | # plugins. The plugins will be processed in the order that they are specified
625 | # here. If the plugin option is specified alongside either of
626 | # password_file or acl_file then the plugin checks will be made first.
627 | #
628 | # If the per_listener_settings option is false, the plugin will be apply to all
629 | # listeners. If per_listener_settings is true, then the plugin will apply to
630 | # the current listener being defined only.
631 | #
632 | # This option is also available as `auth_plugin`, but this use is deprecated
633 | # and will be removed in the future.
634 | #
635 | #plugin
636 |
637 | # If the plugin option above is used, define options to pass to the
638 | # plugin here as described by the plugin instructions. All options named
639 | # using the format plugin_opt_* will be passed to the plugin, for example:
640 | #
641 | # This option is also available as `auth_opt_*`, but this use is deprecated
642 | # and will be removed in the future.
643 | #
644 | # plugin_opt_db_host
645 | # plugin_opt_db_port
646 | # plugin_opt_db_username
647 | # plugin_opt_db_password
648 |
649 |
650 | # =================================================================
651 | # Bridges
652 | # =================================================================
653 |
654 | # A bridge is a way of connecting multiple MQTT brokers together.
655 | # Create a new bridge using the "connection" option as described below. Set
656 | # options for the bridges using the remaining parameters. You must specify the
657 | # address and at least one topic to subscribe to.
658 | #
659 | # Each connection must have a unique name.
660 | #
661 | # The address line may have multiple host address and ports specified. See
662 | # below in the round_robin description for more details on bridge behaviour if
663 | # multiple addresses are used. Note that if you use an IPv6 address, then you
664 | # are required to specify a port.
665 | #
666 | # The direction that the topic will be shared can be chosen by
667 | # specifying out, in or both, where the default value is out.
668 | # The QoS level of the bridged communication can be specified with the next
669 | # topic option. The default QoS level is 0, to change the QoS the topic
670 | # direction must also be given.
671 | #
672 | # The local and remote prefix options allow a topic to be remapped when it is
673 | # bridged to/from the remote broker. This provides the ability to place a topic
674 | # tree in an appropriate location.
675 | #
676 | # For more details see the mosquitto.conf man page.
677 | #
678 | # Multiple topics can be specified per connection, but be careful
679 | # not to create any loops.
680 | #
681 | # If you are using bridges with cleansession set to false (the default), then
682 | # you may get unexpected behaviour from incoming topics if you change what
683 | # topics you are subscribing to. This is because the remote broker keeps the
684 | # subscription for the old topic. If you have this problem, connect your bridge
685 | # with cleansession set to true, then reconnect with cleansession set to false
686 | # as normal.
687 | #connection
688 | #address [:] [[:]]
689 | #topic [[[out | in | both] qos-level] local-prefix remote-prefix]
690 |
691 | # If you need to have the bridge connect over a particular network interface,
692 | # use bridge_bind_address to tell the bridge which local IP address the socket
693 | # should bind to, e.g. `bridge_bind_address 192.168.1.10`
694 | #bridge_bind_address
695 |
696 | # If a bridge has topics that have "out" direction, the default behaviour is to
697 | # send an unsubscribe request to the remote broker on that topic. This means
698 | # that changing a topic direction from "in" to "out" will not keep receiving
699 | # incoming messages. Sending these unsubscribe requests is not always
700 | # desirable, setting bridge_attempt_unsubscribe to false will disable sending
701 | # the unsubscribe request.
702 | #bridge_attempt_unsubscribe true
703 |
704 | # Set the version of the MQTT protocol to use with for this bridge. Can be one
705 | # of mqttv50, mqttv311 or mqttv31. Defaults to mqttv311.
706 | #bridge_protocol_version mqttv311
707 |
708 | # Set the clean session variable for this bridge.
709 | # When set to true, when the bridge disconnects for any reason, all
710 | # messages and subscriptions will be cleaned up on the remote
711 | # broker. Note that with cleansession set to true, there may be a
712 | # significant amount of retained messages sent when the bridge
713 | # reconnects after losing its connection.
714 | # When set to false, the subscriptions and messages are kept on the
715 | # remote broker, and delivered when the bridge reconnects.
716 | #cleansession false
717 |
718 | # Set the amount of time a bridge using the lazy start type must be idle before
719 | # it will be stopped. Defaults to 60 seconds.
720 | #idle_timeout 60
721 |
722 | # Set the keepalive interval for this bridge connection, in
723 | # seconds.
724 | #keepalive_interval 60
725 |
726 | # Set the clientid to use on the local broker. If not defined, this defaults to
727 | # 'local.'. If you are bridging a broker to itself, it is important
728 | # that local_clientid and clientid do not match.
729 | #local_clientid
730 |
731 | # If set to true, publish notification messages to the local and remote brokers
732 | # giving information about the state of the bridge connection. Retained
733 | # messages are published to the topic $SYS/broker/connection//state
734 | # unless the notification_topic option is used.
735 | # If the message is 1 then the connection is active, or 0 if the connection has
736 | # failed.
737 | # This uses the last will and testament feature.
738 | #notifications true
739 |
740 | # Choose the topic on which notification messages for this bridge are
741 | # published. If not set, messages are published on the topic
742 | # $SYS/broker/connection//state
743 | #notification_topic
744 |
745 | # Set the client id to use on the remote end of this bridge connection. If not
746 | # defined, this defaults to 'name.hostname' where name is the connection name
747 | # and hostname is the hostname of this computer.
748 | # This replaces the old "clientid" option to avoid confusion. "clientid"
749 | # remains valid for the time being.
750 | #remote_clientid
751 |
752 | # Set the password to use when connecting to a broker that requires
753 | # authentication. This option is only used if remote_username is also set.
754 | # This replaces the old "password" option to avoid confusion. "password"
755 | # remains valid for the time being.
756 | #remote_password
757 |
758 | # Set the username to use when connecting to a broker that requires
759 | # authentication.
760 | # This replaces the old "username" option to avoid confusion. "username"
761 | # remains valid for the time being.
762 | #remote_username
763 |
764 | # Set the amount of time a bridge using the automatic start type will wait
765 | # until attempting to reconnect.
766 | # This option can be configured to use a constant delay time in seconds, or to
767 | # use a backoff mechanism based on "Decorrelated Jitter", which adds a degree
768 | # of randomness to when the restart occurs.
769 | #
770 | # Set a constant timeout of 20 seconds:
771 | # restart_timeout 20
772 | #
773 | # Set backoff with a base (start value) of 10 seconds and a cap (upper limit) of
774 | # 60 seconds:
775 | # restart_timeout 10 30
776 | #
777 | # Defaults to jitter with a base of 5 and cap of 30
778 | #restart_timeout 5 30
779 |
780 | # If the bridge has more than one address given in the address/addresses
781 | # configuration, the round_robin option defines the behaviour of the bridge on
782 | # a failure of the bridge connection. If round_robin is false, the default
783 | # value, then the first address is treated as the main bridge connection. If
784 | # the connection fails, the other secondary addresses will be attempted in
785 | # turn. Whilst connected to a secondary bridge, the bridge will periodically
786 | # attempt to reconnect to the main bridge until successful.
787 | # If round_robin is true, then all addresses are treated as equals. If a
788 | # connection fails, the next address will be tried and if successful will
789 | # remain connected until it fails
790 | #round_robin false
791 |
792 | # Set the start type of the bridge. This controls how the bridge starts and
793 | # can be one of three types: automatic, lazy and once. Note that RSMB provides
794 | # a fourth start type "manual" which isn't currently supported by mosquitto.
795 | #
796 | # "automatic" is the default start type and means that the bridge connection
797 | # will be started automatically when the broker starts and also restarted
798 | # after a short delay (30 seconds) if the connection fails.
799 | #
800 | # Bridges using the "lazy" start type will be started automatically when the
801 | # number of queued messages exceeds the number set with the "threshold"
802 | # parameter. It will be stopped automatically after the time set by the
803 | # "idle_timeout" parameter. Use this start type if you wish the connection to
804 | # only be active when it is needed.
805 | #
806 | # A bridge using the "once" start type will be started automatically when the
807 | # broker starts but will not be restarted if the connection fails.
808 | #start_type automatic
809 |
810 | # Set the number of messages that need to be queued for a bridge with lazy
811 | # start type to be restarted. Defaults to 10 messages.
812 | # Must be less than max_queued_messages.
813 | #threshold 10
814 |
815 | # If try_private is set to true, the bridge will attempt to indicate to the
816 | # remote broker that it is a bridge not an ordinary client. If successful, this
817 | # means that loop detection will be more effective and that retained messages
818 | # will be propagated correctly. Not all brokers support this feature so it may
819 | # be necessary to set try_private to false if your bridge does not connect
820 | # properly.
821 | #try_private true
822 |
823 | # Some MQTT brokers do not allow retained messages. MQTT v5 gives a mechanism
824 | # for brokers to tell clients that they do not support retained messages, but
825 | # this is not possible for MQTT v3.1.1 or v3.1. If you need to bridge to a
826 | # v3.1.1 or v3.1 broker that does not support retained messages, set the
827 | # bridge_outgoing_retain option to false. This will remove the retain bit on
828 | # all outgoing messages to that bridge, regardless of any other setting.
829 | #bridge_outgoing_retain true
830 |
831 | # If you wish to restrict the size of messages sent to a remote bridge, use the
832 | # bridge_max_packet_size option. This sets the maximum number of bytes for
833 | # the total message, including headers and payload.
834 | # Note that MQTT v5 brokers may provide their own maximum-packet-size property.
835 | # In this case, the smaller of the two limits will be used.
836 | # Set to 0 for "unlimited".
837 | #bridge_max_packet_size 0
838 |
839 |
840 | # -----------------------------------------------------------------
841 | # Certificate based SSL/TLS support
842 | # -----------------------------------------------------------------
843 | # Either bridge_cafile or bridge_capath must be defined to enable TLS support
844 | # for this bridge.
845 | # bridge_cafile defines the path to a file containing the
846 | # Certificate Authority certificates that have signed the remote broker
847 | # certificate.
848 | # bridge_capath defines a directory that will be searched for files containing
849 | # the CA certificates. For bridge_capath to work correctly, the certificate
850 | # files must have ".crt" as the file ending and you must run "openssl rehash
851 | # " each time you add/remove a certificate.
852 | #bridge_cafile
853 | #bridge_capath
854 |
855 |
856 | # If the remote broker has more than one protocol available on its port, e.g.
857 | # MQTT and WebSockets, then use bridge_alpn to configure which protocol is
858 | # requested. Note that WebSockets support for bridges is not yet available.
859 | #bridge_alpn
860 |
861 | # When using certificate based encryption, bridge_insecure disables
862 | # verification of the server hostname in the server certificate. This can be
863 | # useful when testing initial server configurations, but makes it possible for
864 | # a malicious third party to impersonate your server through DNS spoofing, for
865 | # example. Use this option in testing only. If you need to resort to using this
866 | # option in a production environment, your setup is at fault and there is no
867 | # point using encryption.
868 | #bridge_insecure false
869 |
870 | # Path to the PEM encoded client certificate, if required by the remote broker.
871 | #bridge_certfile
872 |
873 | # Path to the PEM encoded client private key, if required by the remote broker.
874 | #bridge_keyfile
875 |
876 | # -----------------------------------------------------------------
877 | # PSK based SSL/TLS support
878 | # -----------------------------------------------------------------
879 | # Pre-shared-key encryption provides an alternative to certificate based
880 | # encryption. A bridge can be configured to use PSK with the bridge_identity
881 | # and bridge_psk options. These are the client PSK identity, and pre-shared-key
882 | # in hexadecimal format with no "0x". Only one of certificate and PSK based
883 | # encryption can be used on one
884 | # bridge at once.
885 | #bridge_identity
886 | #bridge_psk
887 |
888 |
889 | # =================================================================
890 | # External config files
891 | # =================================================================
892 |
893 | # External configuration files may be included by using the
894 | # include_dir option. This defines a directory that will be searched
895 | # for config files. All files that end in '.conf' will be loaded as
896 | # a configuration file. It is best to have this as the last option
897 | # in the main file. This option will only be processed from the main
898 | # configuration file. The directory specified must not contain the
899 | # main configuration file.
900 | # Files within include_dir will be loaded sorted in case-sensitive
901 | # alphabetical order, with capital letters ordered first. If this option is
902 | # given multiple times, all of the files from the first instance will be
903 | # processed before the next instance. See the man page for examples.
904 | #include_dir
905 |
--------------------------------------------------------------------------------
/solutions/idl/provisioning/dashboards/normal-dashboard.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: default
5 | type: file
6 | folder: 'normal'
7 | folderUid: 'normal'
8 | updateIntervalSeconds: 30
9 | options:
10 | path: /etc/dashboards
11 |
--------------------------------------------------------------------------------
/solutions/idl/provisioning/datasources/tailscale-datasource.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | datasources:
4 | - name: NormalTimescale
5 | type: postgres
6 | url: timescaledb:5432
7 | database: postgres
8 | user: postgres
9 | uid: normal-timescaledb
10 | secureJsonData:
11 | password: "password"
12 | jsonData:
13 | sslmode: "disable"
14 | maxOpenConns: 100
15 | maxIdleConns: 100
16 | connMaxLifetime: 14400
17 | postgresVersion: 1400
18 | maxIdleConnsAuto: true
19 | timescaledb: true
20 |
--------------------------------------------------------------------------------
/solutions/idl/readme.md:
--------------------------------------------------------------------------------
1 | # Normal Framework - Independent Data Layer
2 |
3 | This directory is an example of how to set up an Independent Data Layer (IDL) using Normal Framework. The entry point is a `docker-compose` file which defines the various required services and connections.
4 |
5 | ## Running this example
6 | 1. Copy the files in this directory
7 | 2. Run `docker-compose pull && docker-compose up -d`
8 |
9 | ## Overview
10 | In this example setup, the `Historian` service listens for MQTT Sparkplug and adds them to the [Timescale](https://www.timescale.com/) database. [Mosquitto](https://mosquitto.org/) is the MQTT message broker. The IDL data ultimately resides in the Timescale database which can be queried by dependent services.
11 |
12 | ## User Interface
13 | This example setup uses [Grafana](https://grafana.com/), with a few pre-defined dashboards for displaying the IDL data. Additional Dashboards can be configured through the Grafana User Interface, or by changing the default configuration in the `./dashboards/normal` directory.
14 |
15 | When running locally, you can visit Grafana at http://localhost:3000 using the username/password `admin:admin`, and Normal Framework at http://localhost:8080. You can also connect to TimescaleDB or Mosquitto, to explore connecting other solutions.
16 |
17 | ## Moving to Production
18 | This solution template is intended to be an example of how Normal can be used to build an end-to-end IDL solution. Before deploying this in production, you should ensure you've considered:
19 |
20 | * Creating persistent volumes for Redis, Timescale, and Normal so data are persistent
21 | * Securing the connections between the various components by changing the password defaults and creating firewall rules.
22 | * Ensure you have a back up strategy for your valuable data. For instance, you could move the TimescaleDB to a cloud service like [Timescale Cloud](https://www.timescale.com/cloud) which will ensure you have highly available storage for your data.
23 |
--------------------------------------------------------------------------------
/sparkplug-historian/README:
--------------------------------------------------------------------------------
1 |
2 | The Sparkplug-SQL historian reads data from a MQTT/Sparkplug broker
3 | and inserts them into a Postgres/Timescale DB.
4 |
5 | Prerequisites:
6 |
7 | * MQTT broker is running
8 | * Postgres is running with a username for the historian
9 | * Tables have been created using tables.sql
10 |
11 | You may use the following environment variables to configure the MQTT
12 | and Postgres connections:
13 |
14 | The default topic is /spBv1.0/{ SPARKPLUG_GROUP_ID }/#
15 | SPARKPLUG_GROUP_ID=normalgw
16 |
17 | Mqtt connection information. SSL is not currently supported for the historian.
18 | MQTT_BROKER=LOCALHOST
19 | MQTT_PORT=1883
20 | MQTT_CLIENT_ID=sparkplug_historian
21 | MQTT_USERNAME=
22 | MQTT_PASSWORD=
23 |
24 | Postgres connection information.
25 | PGHOST=localhost
26 | PGUSER=sparkplug
27 | PGPASSWORD=password
28 | PGPORT=5432
29 | PGDATABASE=sparkplug
30 | PGSCHEMA=public
31 |
32 |
--------------------------------------------------------------------------------
/sparkplug-historian/sparkplug-sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/normalframework/nf-sdk/7aca48850a7ca03308dab275ab86a742836440bf/sparkplug-historian/sparkplug-sql
--------------------------------------------------------------------------------
/sparkplug-historian/tables.sql:
--------------------------------------------------------------------------------
1 |
2 | DROP TABLE metadata CASCADE;
3 | drop table metrics CASCADE;
4 |
5 |
6 | CREATE TABLE IF NOT EXISTS nodes (
7 | group_name text NOT NULL,
8 | node_name text NOT NULL,
9 | lgsn bigint,
10 | PRIMARY KEY (group_name, node_name)
11 | );
12 |
13 | CREATE TABLE IF NOT EXISTS metadata (
14 | id serial UNIQUE,
15 | group_name text NOT NULL, --
16 | node_name text NOT NULL,
17 | device_name text NOT NULL,
18 | metric_name text NOT NULL,
19 | metric_alias bigint,
20 | last_dbirth TIMESTAMPTZ,
21 | last_ddeath TIMESTAMPTZ,
22 | dbirth_seq INT,
23 | -- holds the PropertySet received in the last DBIRTH
24 | properties json DEFAULT '{}',
25 | PRIMARY KEY (group_name, node_name, device_name, metric_name)
26 | );
27 |
28 | CREATE INDEX IF NOT EXISTS metadata_index ON metadata (group_name, node_name, device_name, metric_alias);
29 |
30 | -- holds the raw metric data, converted to scalar
31 | CREATE TABLE IF NOT EXISTS metrics (
32 | metric_id INT REFERENCES metadata(id),
33 | time TIMESTAMPTZ,
34 | insert_time TIMESTAMPTZ DEFAULT NOW(),
35 | value double precision --don't support strings or types for now ...
36 | );
37 |
38 | -- unique index prevents duplicate data from being inserted into the
39 | -- table
40 | CREATE UNIQUE INDEX IF NOT EXISTS metrics_index ON metrics (metric_id, time);
41 |
42 |
--------------------------------------------------------------------------------
/sparkplug-historian/views.sql:
--------------------------------------------------------------------------------
1 | DROP VIEW IF EXISTS bacnet_metadata;
2 | CREATE VIEW bacnet_metadata AS (
3 | SELECT
4 | id metric_id,
5 | node_name,
6 | (properties->'device_id'->>'stringValue')::int device_id,
7 | (properties->'device_uuid'->>'stringValue') device_uuid,
8 | (properties->'scanned_at'->>'stringValue')::timestamp scanned_at,
9 | (properties->'prop_object_name'->>'stringValue') object_name,
10 | (properties->'prop_description'->>'stringValue') object_description,
11 | (properties->'type'->>'stringValue') object_type,
12 | (properties->'instance'->>'stringValue') object_instance,
13 | (properties->'device_prop_object_name'->>'stringValue') device_name,
14 | (properties->'device_prop_description'->>'stringValue') device_description,
15 | (properties->'device_prop_model_name'->>'stringValue') device_model_name,
16 | (properties->'device_prop_vendor_name'->>'stringValue') device_vendor_name,
17 | (properties->'device_prop_application_software_version'->>'stringValue') device_application_software_version,
18 | (properties->'device_prop_location'->>'stringValue') device_location,
19 | (properties->'prop_low_limit'->>'stringValue')::float low_limit,
20 | (properties->'prop_high_limit'->>'stringValue')::float high_limit,
21 | (properties->'prop_min_pres_value'->>'stringValue')::float min_pres_value,
22 | (properties->'prop_max_pres_value'->>'stringValue')::float max_pres_value,
23 | (properties->'class'->>'stringValue') className,
24 | (properties->'equipRef'->>'stringValue') equipRef,
25 | string_to_array(properties->'markers'->>'stringValue', ',') markers
26 | FROM
27 | metadata
28 | )
29 | ;
30 |
31 | CREATE VIEW public.data_replication_status AS
32 | SELECT nodes.node_name,
33 | ('1970-01-01 00:00:00'::timestamp without time zone + ((((nodes.lgsn >> 10) / 1000))::double precision * '00:00:01'::interval)) AS lgsm_time
34 | FROM public.nodes;
35 |
36 |
37 | CREATE FUNCTION array_distinct(anyarray) RETURNS anyarray AS $f$
38 | SELECT array_agg(DISTINCT x) FROM unnest($1) t(x);
39 | $f$ LANGUAGE SQL IMMUTABLE;
40 |
41 |
42 | select
43 | device_vendor_name,
44 | device_model_name,
45 | array_distinct(array_agg(node_name)) as sites,
46 | count(*) as object_count,
47 | count(distinct (node_name, device_id)) as device_count
48 | from
49 | bacnet_metadata
50 | group by
51 | device_vendor_name,
52 | device_model_name;
53 |
54 |
55 | with object_counts as (
56 | SELECT
57 | device_vendor_name,
58 | device_model_name,
59 | node_name,
60 | device_id,
61 | object_type,
62 | count(*) as cnt,
63 | array_distinct(array_agg(node_name)) as sites
64 | FROM
65 | bacnet_metadata
66 | GROUP BY
67 | node_name,
68 | device_vendor_name,
69 | device_model_name,
70 | device_id,
71 | object_type),
72 | device_vectors as (
73 | SELECT
74 | device_vendor_name,
75 | device_model_name,
76 | node_name,
77 | device_id,
78 | array_agg(cnt) as object_counts,
79 | array_agg(object_type) as object_types
80 | FROM object_counts
81 | GROUP BY device_vendor_name,
82 | device_model_name,
83 | node_name, device_id)
84 |
85 | select
86 | device_vendor_name,
87 | device_model_name,
88 | array_distinct(array_agg(node_name)),
89 | count(distinct (node_name, device_id)) as device_count,
90 | object_counts
91 | from device_vectors
92 | group by
93 | device_vendor_name,
94 | device_model_name,
95 | object_counts
96 | ;
97 |
--------------------------------------------------------------------------------
/tools/modpoll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/normalframework/nf-sdk/7aca48850a7ca03308dab275ab86a742836440bf/tools/modpoll
--------------------------------------------------------------------------------
/tools/restore.py:
--------------------------------------------------------------------------------
1 | """Restor a backup"""
2 |
3 | import json
4 | import zipfile
5 | import os
6 | import sys
7 | sys.path.append(os.path.join(os.path.dirname(__file__), "..", "examples", "api"))
8 |
9 | from helpers import NfClient, print_response
10 |
11 | def load_points(client, ar, create_layers=False):
12 | if create_layers:
13 | lfp = ar.open("layers.jsonl")
14 | for l in lfp.readlines():
15 | res = json.loads(l)
16 | for l in res["layers"]:
17 | print("Creating layer", l["name"])
18 | res = client.post("/api/v1/point/layers", json={
19 | "layer": l})
20 | print_response(res)
21 | lfp.close()
22 |
23 | fp = ar.open("points.jsonl")
24 | points = []
25 | i = 0
26 | for l in fp.readlines():
27 | i += 1
28 | rec = json.loads(l)
29 | points.append(rec)
30 | if len(points) == 500:
31 | res = client.post("/api/v1/point/points", json={
32 | "points": points,
33 | "is_async": True,
34 | })
35 | print_response(res)
36 | points = []
37 | if len(points) > 0:
38 | res = client.post("/api/v1/point/points", json={
39 | "points": points,
40 | "is_async": True,
41 | })
42 | print_response(res)
43 |
44 | if __name__ == '__main__':
45 | client = NfClient()
46 |
47 | if len(sys.argv) < 2:
48 | print("usage:\n\trestore.py \n")
49 | sys.exit(1)
50 | ar = zipfile.ZipFile(sys.argv[1])
51 | print(ar.namelist())
52 | load_points(client, ar)
53 |
--------------------------------------------------------------------------------
/view/listener.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "expose-nf",
3 | "type": "NODEPORT",
4 | "node_id": "beaa0d8a-78c4-405e-bd5a-4042bf1fdd9a",
5 | "service_selector": {
6 | "name": "normalframework"
7 | },
8 | "allow_access_from": [
9 | "71.112.164.133/32"
10 | ],
11 | "service_ports": [
12 | {
13 | "protocol": "TCP",
14 | "port": 8080,
15 | "node_port": 8080
16 | }
17 | ]
18 | }
19 |
--------------------------------------------------------------------------------
/view/podspec.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "normalframework",
3 | "kind": "SINGLETON",
4 | "image_pull_secrets": ["azureacr-login"],
5 | "node_id": "beaa0d8a-78c4-405e-bd5a-4042bf1fdd9a",
6 | "networks": [
7 | {
8 | "network_id": "n-09e49136e6c79a60",
9 | "is_default": true
10 | }
11 | ],
12 | "services" : [
13 | {
14 | "name": "redis",
15 | "image": {
16 | "name": "normalframework.azurecr.io/normalframework/redis",
17 | "version": "2.0"
18 | },
19 | "docker": {
20 | "volume_mounts": [{
21 | "name": "redis-state",
22 | "mount_path": "/data",
23 | "read_only": false
24 | }]
25 | }
26 | },
27 | {
28 | "name": "nf",
29 | "image": {
30 | "name": "normalframework.azurecr.io/normalframework/nf-full",
31 | "version": "2.0"
32 | },
33 | "docker": {
34 | "environment_vars": {
35 | "REDIS_HOST": "redis"
36 | },
37 | "volume_mounts": [{
38 | "name": "nf-state",
39 | "mount_path": "/var/nf",
40 | "read_only": false
41 | }]
42 | }
43 | }
44 | ],
45 | "volumes": [
46 | {
47 | "name": "redis-state",
48 | "empty_dir": {}
49 | },
50 | {
51 | "name": "nf-state",
52 | "empty_dir": {}
53 | }
54 | ]
55 | }
56 |
--------------------------------------------------------------------------------