├── .gitignore ├── LICENSE ├── LOG └── 2F6913DB │ ├── 00000019 │ └── 00000001-61D9D80A.MF4 │ ├── 00000020 │ └── 00000001-61D9D830.MF4 │ └── 00000021 │ └── 00000001-61D9D9BA.MF4 ├── README.md ├── dashboard-templates ├── dashboard-template-sample-data.json └── dashboard-template-simple.json ├── dbc_files ├── canmod-gps.dbc └── nissan_uds.dbc ├── deploy-aws-lambda ├── README.md ├── README_manual_deployment.md ├── arn-layers │ ├── README.md │ ├── build_layers.bat │ ├── build_layers.py │ ├── lambda_layer_arns.csv │ └── requirements.txt ├── delete_aws_lambda.bat ├── delete_aws_lambda.py ├── deploy_aws_lambda.bat ├── deploy_aws_lambda.py ├── install.bat ├── lambda_function.py └── requirements.txt ├── inputs.py ├── install.bat ├── main.bat ├── main.py ├── requirements.txt ├── utils.py └── utils_db.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.whl 3 | python 4 | *.zip 5 | build.bat 6 | *inputs_test.py 7 | *j1939-engine.dbc 8 | *j1939-speed.dbc 9 | *test_new.py 10 | *env/* 11 | *_test* 12 | *lambda_inputs_test.json* 13 | *deploy-aws-lambda/env/* 14 | *ev6-gps.dbc* 15 | 16 | *canedge-influxdb-writer.zip* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Martin Falch 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LOG/2F6913DB/00000019/00000001-61D9D80A.MF4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSS-Electronics/canedge-influxdb-writer/9a118c1199dc8313806a293edbfd82e945e58bff/LOG/2F6913DB/00000019/00000001-61D9D80A.MF4 -------------------------------------------------------------------------------- /LOG/2F6913DB/00000020/00000001-61D9D830.MF4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSS-Electronics/canedge-influxdb-writer/9a118c1199dc8313806a293edbfd82e945e58bff/LOG/2F6913DB/00000020/00000001-61D9D830.MF4 -------------------------------------------------------------------------------- /LOG/2F6913DB/00000021/00000001-61D9D9BA.MF4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CSS-Electronics/canedge-influxdb-writer/9a118c1199dc8313806a293edbfd82e945e58bff/LOG/2F6913DB/00000021/00000001-61D9D9BA.MF4 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Grafana-InfluxDB - Push CAN Bus Data to InfluxDB [LEGACY] 2 | 3 | **This project is now LEGACY and not supported - we recommend using our [Grafana-Athena](https://www.csselectronics.com/pages/telematics-dashboard-open-source) integration.** 4 | 5 | 6 | This project lets you DBC decode CAN data from your [CANedge](https://www.csselectronics.com/pages/can-bus-hardware-products) CAN/LIN data logger - and push the data into an InfluxDB database. From here, the data can be visualized in your own customized, open source Grafana dashboards. 7 | 8 | For the full step-by-step guide to setting up your dashboard, see the [CANedge intro](https://canlogger.csselectronics.com/canedge-getting-started/ce2/log-file-tools/browser-dashboards). 9 | 10 | ![CAN Bus Grafana InfluxDB Dashboard](https://canlogger1000.csselectronics.com/img/CAN-Bus-Telematics-Dashboard-InfluxDB-Grafana.png) 11 | 12 | ## Backend vs. Writer 13 | We provide two options for integrating your CANedge data with Grafana dashboards: 14 | 15 | The [CANedge Grafana Backend](https://github.com/CSS-Electronics/canedge-grafana-backend) app only processes data 'when needed' by an end user - and requires no database. It is ideal when you have large amounts of data - as you only process the data you need to visualize. 16 | 17 | The [CANedge InfluxDB Writer](https://github.com/CSS-Electronics/canedge-influxdb-writer) processes data in advance (e.g. periodically or on-file-upload) and writes the decoded data to a database. It is ideal if dashboard loading speed is critical - but with the downside that data is processed/stored even if it is not used. 18 | 19 | For details incl. 'pros & cons', see our [intro to telematics dashboards](https://www.csselectronics.com/pages/telematics-dashboard-open-source). 20 | 21 | ---- 22 | 23 | ## Features 24 | ``` 25 | - easily load MF4 log files from local disk or S3 server 26 | - fetch data from hardcoded time period - or automate with dynamic periods 27 | - DBC-decode data and optionally extract specific signals 28 | - optionally resample data to specific frequency 29 | - optionally process multi-frame CAN data (ISO TP), incl. UDS, J1939, NMEA 2000 30 | - write the data to your own InfluxDB time series database 31 | ``` 32 | ---- 33 | 34 | ## Installation 35 | 36 | In this section we detail how to deploy the app on a PC. 37 | 38 | Note: We recommend to test the deployment with our sample data as the first step. 39 | 40 | ---- 41 | 42 | ### 1: Deploy the integration locally on your PC 43 | 44 | #### Install dependencies 45 | 46 | - Install Python 3.9.13 for Windows ([32 bit](https://www.python.org/ftp/python/3.9.13/python-3.9.13.exe)/[64 bit](https://www.python.org/ftp/python/3.9.13/python-3.9.13-amd64.exe)) or [Linux](https://www.python.org/downloads/release/python-3913/) (_enable 'Add to PATH'_) 47 | - Download this project as a zip via the green button and unzip it 48 | - Open the folder with the `requirements.txt` file 49 | - Open `inputs.py` with a text editor and add your InfluxDB Cloud details 50 | - Double click the `install.bat` (Windows) or open your [command prompt](https://www.youtube.com/watch?v=bgSSJQolR0E&t=47s) and enter below: 51 | 52 | ##### Windows 53 | ``` 54 | python -m venv env & env\Scripts\activate & pip install -r requirements.txt 55 | ``` 56 | 57 | ##### Linux 58 | ``` 59 | python3 -m venv env && source env/bin/activate && pip install -r requirements.txt 60 | ``` 61 | 62 | #### Run script to write sample data to InfluxDB Cloud 63 | 64 | - Double-click `main.bat` (Windows) or open your command prompt and enter below: 65 | 66 | ##### Windows 67 | ``` 68 | env\Scripts\activate & python main.py 69 | ``` 70 | 71 | ##### Linux 72 | ``` 73 | env/bin/activate && python3 main.py 74 | ``` 75 | 76 | #### Set up Grafana Cloud 77 | 78 | - In `Configuration/Plugins` install `TrackMap` 79 | - In `Dashboards/Browse` click `Import` and load the `dashboard-template-sample-data.json` from this repo 80 | - Use the date/time browser to display the last 7 days 81 | 82 | You should now see the sample data visualized in Grafana. 83 | 84 | Note: To activate your virtual environment use `env\Scripts\activate` (Linux: `source env/bin/activate`) 85 | 86 | ---- 87 | 88 | ### 2: Load your own data & DBC files 89 | 90 | #### Load from local disk 91 | - Replace the sample `LOG/` folder with your own `LOG/` folder 92 | - Verify that your data is structured as on the CANedge SD card i.e. `[device_id]/[session]/[split].MF4` 93 | - Add your DBC file(s) to the `dbc_files` folder 94 | - Update `devices` and `dbc_paths` in `inputs.py` to reflect your added log and DBC files 95 | - Set `days_offset = None` to ensure your data is written at the correct date 96 | - Run the script via the `main.bat` 97 | 98 | Note: If you're using the free InfluxDB Cloud, there is a limit of 200 unique signals per device - make sure to add a filtered list of signals `inputs.py` or modify your DBC file to contain less than the limit. 99 | 100 | 101 | #### Load from S3 102 | - Add your DBC file(s) to the `dbc_files` folder 103 | - Update `dbc_paths` in `inputs.py` to reflect your added log and DBC files 104 | - Update `devices` in `inputs.py` to reflect your S3 structure i.e. `["your_bucket/device_id"]` 105 | - Set `days_offset = None` to ensure your data is written at the correct date 106 | - Update the S3 details in `inputs.py` with your S3 server and set `s3 = True` 107 | - Run the script via the `main.bat` 108 | 109 | Note: You may want to modify other variables like adding signal filters, changing the resampling or modifying the default start date. 110 | 111 | 112 | #### Import simplified dashboard template 113 | - In `Dashboards/Browse` click `Import` and load the `dashboard-template-simple.json` from this repo 114 | - Select a time period and signal with data to verify that your own data displays as expected 115 | - After this, you can optionally start customizing your panels as explained in the CANedge Intro 116 | 117 | ---- 118 | 119 | ### 3: Automate & scale (e.g. via AWS Lambda) 120 | 121 | Once you've verified that your data is uploaded correctly, you can move on to automating it. See the [CANedge intro](https://canlogger.csselectronics.com/canedge-getting-started/ce2/log-file-tools/browser-dashboards) for details. 122 | 123 | We recommend to see the README in `deploy-aws-lambda/` of this repo for automation via AWS Lambda functions. 124 | 125 | ---- 126 | 127 | ## Other information 128 | 129 | #### Delete data from InfluxDB 130 | If you need to delete data in InfluxDB that you e.g. uploaded as part of a test, you can use the `delete_influx(name)` function from the `SetupInflux` class. Call it by parsing the name of the 'measurement' to delete (i.e. the device ID): `influx.delete_influx("958D2219")` 131 | 132 | #### Multi-frame data (ISO TP) 133 | You can easily process multi-frame data by setting the `tp_type` variable to `"j1939"`, `"uds"` or `"nmea"` and adding the relevant DBC file. For example, you can test this for the sample data by adding the DBC `"dbc_files/nissan_uds.dbc"` and setting `tp_type = "uds"`. 134 | 135 | ---- 136 | 137 | #### Regarding InfluxDB and S3 usage costs 138 | Note that if you use the paid InfluxDB cloud and a paid S3 server, we recommend that you monitor usage during your tests early on to ensure that no unexpected cost developments occur. -------------------------------------------------------------------------------- /dashboard-templates/dashboard-template-sample-data.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "target": { 12 | "limit": 100, 13 | "matchAny": false, 14 | "tags": [], 15 | "type": "dashboard" 16 | }, 17 | "type": "dashboard" 18 | } 19 | ] 20 | }, 21 | "editable": true, 22 | "fiscalYearStartMonth": 0, 23 | "graphTooltip": 0, 24 | "id": 22, 25 | "iteration": 1649182640968, 26 | "links": [], 27 | "liveNow": false, 28 | "panels": [ 29 | { 30 | "aliasColors": { 31 | "AccelerationY": "#3d85c6", 32 | "AngularRateY": "semi-dark-blue" 33 | }, 34 | "bars": false, 35 | "dashLength": 10, 36 | "dashes": false, 37 | "description": "", 38 | "fill": 1, 39 | "fillGradient": 0, 40 | "gridPos": { 41 | "h": 10, 42 | "w": 14, 43 | "x": 0, 44 | "y": 0 45 | }, 46 | "hiddenSeries": false, 47 | "id": 2, 48 | "legend": { 49 | "avg": false, 50 | "current": false, 51 | "max": false, 52 | "min": false, 53 | "show": true, 54 | "total": false, 55 | "values": false 56 | }, 57 | "lines": true, 58 | "linewidth": 1, 59 | "nullPointMode": "null", 60 | "options": { 61 | "alertThreshold": true 62 | }, 63 | "percentage": false, 64 | "pluginVersion": "8.4.5-56740", 65 | "pointradius": 0.5, 66 | "points": true, 67 | "renderer": "flot", 68 | "seriesOverrides": [ 69 | { 70 | "alias": "AccelerationZ", 71 | "yaxis": 1 72 | } 73 | ], 74 | "spaceLength": 10, 75 | "stack": false, 76 | "steppedLine": false, 77 | "targets": [ 78 | { 79 | "datasource": { 80 | "type": "influxdb" 81 | }, 82 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] =~ /${SIGNAL:regex}/)\r\n |> yield(name: \"mean\")", 83 | "refId": "A" 84 | } 85 | ], 86 | "thresholds": [], 87 | "timeRegions": [], 88 | "title": "${SIGNAL}", 89 | "tooltip": { 90 | "shared": true, 91 | "sort": 0, 92 | "value_type": "individual" 93 | }, 94 | "type": "graph", 95 | "xaxis": { 96 | "mode": "time", 97 | "show": true, 98 | "values": [] 99 | }, 100 | "yaxes": [ 101 | { 102 | "format": "short", 103 | "logBase": 1, 104 | "show": true 105 | }, 106 | { 107 | "format": "short", 108 | "logBase": 1, 109 | "show": true 110 | } 111 | ], 112 | "yaxis": { 113 | "align": false 114 | } 115 | }, 116 | { 117 | "autoZoom": true, 118 | "defaultLayer": "OpenStreetMap", 119 | "description": "", 120 | "gridPos": { 121 | "h": 10, 122 | "w": 10, 123 | "x": 14, 124 | "y": 0 125 | }, 126 | "id": 3, 127 | "lineColor": "#3274D9", 128 | "maxDataPoints": 500, 129 | "pointColor": "#FF780A", 130 | "scrollWheelZoom": false, 131 | "showLayerChanger": true, 132 | "targets": [ 133 | { 134 | "datasource": { 135 | "type": "influxdb" 136 | }, 137 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: median)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"Latitude\" or r[\"_field\"] == \"Longitude\")\r\n |> yield(name: \"median\")", 138 | "refId": "A" 139 | } 140 | ], 141 | "title": "GPS Position", 142 | "type": "pr0ps-trackmap-panel" 143 | }, 144 | { 145 | "aliasColors": { 146 | "Speed": "dark-blue" 147 | }, 148 | "bars": false, 149 | "dashLength": 10, 150 | "dashes": false, 151 | "description": "", 152 | "fill": 2, 153 | "fillGradient": 2, 154 | "gridPos": { 155 | "h": 6, 156 | "w": 4, 157 | "x": 0, 158 | "y": 10 159 | }, 160 | "hiddenSeries": false, 161 | "id": 4, 162 | "legend": { 163 | "avg": false, 164 | "current": false, 165 | "max": false, 166 | "min": false, 167 | "show": true, 168 | "total": false, 169 | "values": false 170 | }, 171 | "lines": true, 172 | "linewidth": 1, 173 | "maxDataPoints": 500, 174 | "nullPointMode": "null", 175 | "options": { 176 | "alertThreshold": true 177 | }, 178 | "percentage": false, 179 | "pluginVersion": "8.4.5-56740", 180 | "pointradius": 0.5, 181 | "points": false, 182 | "renderer": "flot", 183 | "seriesOverrides": [], 184 | "spaceLength": 10, 185 | "stack": false, 186 | "steppedLine": false, 187 | "targets": [ 188 | { 189 | "datasource": { 190 | "type": "influxdb" 191 | }, 192 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"Speed\")\r\n |> yield(name: \"mean\")", 193 | "refId": "A" 194 | } 195 | ], 196 | "thresholds": [], 197 | "timeRegions": [], 198 | "title": "Speed (m/s)", 199 | "tooltip": { 200 | "shared": true, 201 | "sort": 0, 202 | "value_type": "individual" 203 | }, 204 | "type": "graph", 205 | "xaxis": { 206 | "mode": "time", 207 | "show": true, 208 | "values": [] 209 | }, 210 | "yaxes": [ 211 | { 212 | "format": "short", 213 | "logBase": 1, 214 | "show": true 215 | }, 216 | { 217 | "format": "short", 218 | "logBase": 1, 219 | "show": true 220 | } 221 | ], 222 | "yaxis": { 223 | "align": false 224 | } 225 | }, 226 | { 227 | "aliasColors": { 228 | "DistanceAccuracy": "#c3c1c1", 229 | "DistanceTrip": "orange", 230 | "Speed": "dark-blue" 231 | }, 232 | "bars": false, 233 | "dashLength": 10, 234 | "dashes": false, 235 | "description": "", 236 | "fill": 2, 237 | "fillGradient": 2, 238 | "gridPos": { 239 | "h": 6, 240 | "w": 4, 241 | "x": 4, 242 | "y": 10 243 | }, 244 | "hiddenSeries": false, 245 | "id": 5, 246 | "legend": { 247 | "avg": false, 248 | "current": false, 249 | "max": false, 250 | "min": false, 251 | "show": true, 252 | "total": false, 253 | "values": false 254 | }, 255 | "lines": true, 256 | "linewidth": 1, 257 | "maxDataPoints": 500, 258 | "nullPointMode": "null", 259 | "options": { 260 | "alertThreshold": true 261 | }, 262 | "percentage": false, 263 | "pluginVersion": "8.4.5-56740", 264 | "pointradius": 0.5, 265 | "points": false, 266 | "renderer": "flot", 267 | "seriesOverrides": [ 268 | { 269 | "alias": "DistanceAccuracy", 270 | "yaxis": 2 271 | } 272 | ], 273 | "spaceLength": 10, 274 | "stack": false, 275 | "steppedLine": false, 276 | "targets": [ 277 | { 278 | "datasource": { 279 | "type": "influxdb" 280 | }, 281 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"DistanceTrip\" or r[\"_field\"] == \"DistanceAccuracy\")\r\n |> yield(name: \"mean\")", 282 | "refId": "A" 283 | } 284 | ], 285 | "thresholds": [], 286 | "timeRegions": [], 287 | "title": "Trip distance (m)", 288 | "tooltip": { 289 | "shared": true, 290 | "sort": 0, 291 | "value_type": "individual" 292 | }, 293 | "type": "graph", 294 | "xaxis": { 295 | "mode": "time", 296 | "show": true, 297 | "values": [] 298 | }, 299 | "yaxes": [ 300 | { 301 | "format": "short", 302 | "logBase": 1, 303 | "show": true 304 | }, 305 | { 306 | "format": "short", 307 | "logBase": 1, 308 | "show": true 309 | } 310 | ], 311 | "yaxis": { 312 | "align": false 313 | } 314 | }, 315 | { 316 | "aliasColors": { 317 | "Heading": "blue", 318 | "Pitch": "green", 319 | "Roll": "orange", 320 | "Speed": "dark-blue" 321 | }, 322 | "bars": false, 323 | "dashLength": 10, 324 | "dashes": false, 325 | "description": "", 326 | "fill": 0, 327 | "fillGradient": 2, 328 | "gridPos": { 329 | "h": 6, 330 | "w": 6, 331 | "x": 8, 332 | "y": 10 333 | }, 334 | "hiddenSeries": false, 335 | "id": 6, 336 | "legend": { 337 | "avg": false, 338 | "current": false, 339 | "max": false, 340 | "min": false, 341 | "show": true, 342 | "total": false, 343 | "values": false 344 | }, 345 | "lines": true, 346 | "linewidth": 1, 347 | "maxDataPoints": 500, 348 | "nullPointMode": "null", 349 | "options": { 350 | "alertThreshold": true 351 | }, 352 | "percentage": false, 353 | "pluginVersion": "8.4.5-56740", 354 | "pointradius": 0.5, 355 | "points": false, 356 | "renderer": "flot", 357 | "seriesOverrides": [ 358 | { 359 | "alias": "Roll", 360 | "yaxis": 1 361 | }, 362 | { 363 | "alias": "Pitch", 364 | "yaxis": 1 365 | }, 366 | { 367 | "alias": "Heading", 368 | "yaxis": 2 369 | } 370 | ], 371 | "spaceLength": 10, 372 | "stack": false, 373 | "steppedLine": false, 374 | "targets": [ 375 | { 376 | "datasource": { 377 | "type": "influxdb" 378 | }, 379 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"Roll\" or r[\"_field\"] == \"Pitch\" or r[\"_field\"] == \"Heading\" )\r\n |> yield(name: \"mean\")", 380 | "refId": "A" 381 | } 382 | ], 383 | "thresholds": [], 384 | "timeRegions": [], 385 | "title": "Attitude", 386 | "tooltip": { 387 | "shared": true, 388 | "sort": 0, 389 | "value_type": "individual" 390 | }, 391 | "type": "graph", 392 | "xaxis": { 393 | "mode": "time", 394 | "show": true, 395 | "values": [] 396 | }, 397 | "yaxes": [ 398 | { 399 | "format": "short", 400 | "logBase": 1, 401 | "show": true 402 | }, 403 | { 404 | "format": "short", 405 | "logBase": 1, 406 | "show": true 407 | } 408 | ], 409 | "yaxis": { 410 | "align": false 411 | } 412 | }, 413 | { 414 | "description": "", 415 | "fieldConfig": { 416 | "defaults": { 417 | "color": { 418 | "mode": "thresholds" 419 | }, 420 | "decimals": 1, 421 | "mappings": [], 422 | "thresholds": { 423 | "mode": "absolute", 424 | "steps": [ 425 | { 426 | "color": "blue", 427 | "value": null 428 | } 429 | ] 430 | } 431 | }, 432 | "overrides": [] 433 | }, 434 | "gridPos": { 435 | "h": 6, 436 | "w": 3, 437 | "x": 14, 438 | "y": 10 439 | }, 440 | "id": 7, 441 | "maxDataPoints": 500, 442 | "options": { 443 | "colorMode": "background", 444 | "graphMode": "none", 445 | "justifyMode": "auto", 446 | "orientation": "auto", 447 | "reduceOptions": { 448 | "calcs": [ 449 | "mean" 450 | ], 451 | "fields": "", 452 | "values": false 453 | }, 454 | "textMode": "auto" 455 | }, 456 | "pluginVersion": "8.4.5-56740", 457 | "targets": [ 458 | { 459 | "datasource": { 460 | "type": "influxdb" 461 | }, 462 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"Satellites\")\r\n |> yield(name: \"mean\")", 463 | "refId": "A" 464 | } 465 | ], 466 | "title": "#Satellites (avg)", 467 | "type": "stat" 468 | }, 469 | { 470 | "description": "", 471 | "fieldConfig": { 472 | "defaults": { 473 | "color": { 474 | "mode": "thresholds" 475 | }, 476 | "decimals": 0, 477 | "mappings": [], 478 | "thresholds": { 479 | "mode": "absolute", 480 | "steps": [ 481 | { 482 | "color": "blue", 483 | "value": null 484 | } 485 | ] 486 | } 487 | }, 488 | "overrides": [] 489 | }, 490 | "gridPos": { 491 | "h": 6, 492 | "w": 3, 493 | "x": 17, 494 | "y": 10 495 | }, 496 | "id": 8, 497 | "maxDataPoints": 500, 498 | "options": { 499 | "colorMode": "background", 500 | "graphMode": "none", 501 | "justifyMode": "auto", 502 | "orientation": "auto", 503 | "reduceOptions": { 504 | "calcs": [ 505 | "mean" 506 | ], 507 | "fields": "", 508 | "values": false 509 | }, 510 | "textMode": "auto" 511 | }, 512 | "pluginVersion": "8.4.5-56740", 513 | "targets": [ 514 | { 515 | "datasource": { 516 | "type": "influxdb" 517 | }, 518 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"DistanceTotal\")\r\n |> yield(name: \"mean\")", 519 | "refId": "A" 520 | } 521 | ], 522 | "title": "Distance (km)", 523 | "type": "stat" 524 | }, 525 | { 526 | "description": "", 527 | "fieldConfig": { 528 | "defaults": { 529 | "color": { 530 | "mode": "thresholds" 531 | }, 532 | "decimals": 0, 533 | "mappings": [], 534 | "thresholds": { 535 | "mode": "absolute", 536 | "steps": [ 537 | { 538 | "color": "green", 539 | "value": null 540 | }, 541 | { 542 | "color": "#EAB839", 543 | "value": 1 544 | } 545 | ] 546 | } 547 | }, 548 | "overrides": [] 549 | }, 550 | "gridPos": { 551 | "h": 6, 552 | "w": 4, 553 | "x": 20, 554 | "y": 10 555 | }, 556 | "id": 9, 557 | "maxDataPoints": 500, 558 | "options": { 559 | "colorMode": "background", 560 | "graphMode": "none", 561 | "justifyMode": "auto", 562 | "orientation": "auto", 563 | "reduceOptions": { 564 | "calcs": [ 565 | "mean" 566 | ], 567 | "fields": "", 568 | "values": false 569 | }, 570 | "textMode": "auto" 571 | }, 572 | "pluginVersion": "8.4.5-56740", 573 | "targets": [ 574 | { 575 | "datasource": { 576 | "type": "influxdb" 577 | }, 578 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] == \"Fence1\" or r[\"_field\"] == \"Fence2\" or r[\"_field\"] == \"Fence3\" or r[\"_field\"] == \"Fence4\")\r\n |> yield(name: \"mean\")", 579 | "refId": "A" 580 | } 581 | ], 582 | "title": "Geofence status", 583 | "type": "stat" 584 | } 585 | ], 586 | "refresh": false, 587 | "schemaVersion": 35, 588 | "style": "dark", 589 | "tags": [], 590 | "templating": { 591 | "list": [ 592 | { 593 | "current": { 594 | "selected": false, 595 | "text": "2F6913DB", 596 | "value": "2F6913DB" 597 | }, 598 | "definition": "import \"influxdata/influxdb/schema\"\r\nschema.measurements(bucket: v.defaultBucket)", 599 | "hide": 0, 600 | "includeAll": false, 601 | "label": "DEVICE", 602 | "multi": false, 603 | "name": "DEVICE", 604 | "options": [], 605 | "query": "import \"influxdata/influxdb/schema\"\r\nschema.measurements(bucket: v.defaultBucket)", 606 | "refresh": 1, 607 | "regex": "", 608 | "skipUrlSync": false, 609 | "sort": 0, 610 | "type": "query" 611 | }, 612 | { 613 | "current": { 614 | "selected": true, 615 | "text": [ 616 | "AccelerationX", 617 | "AccelerationY" 618 | ], 619 | "value": [ 620 | "AccelerationX", 621 | "AccelerationY" 622 | ] 623 | }, 624 | "definition": "import \"influxdata/influxdb/schema\"\r\n\r\nschema.fieldKeys(bucket: v.defaultBucket)", 625 | "hide": 0, 626 | "includeAll": true, 627 | "label": "SIGNAL", 628 | "multi": true, 629 | "name": "SIGNAL", 630 | "options": [], 631 | "query": "import \"influxdata/influxdb/schema\"\r\n\r\nschema.fieldKeys(bucket: v.defaultBucket)", 632 | "refresh": 1, 633 | "regex": "", 634 | "skipUrlSync": false, 635 | "sort": 0, 636 | "type": "query" 637 | } 638 | ] 639 | }, 640 | "time": { 641 | "from": "now-1680m", 642 | "to": "now-1500m" 643 | }, 644 | "timepicker": {}, 645 | "timezone": "", 646 | "title": "CANedge InfluxDB Writer - Template", 647 | "uid": "nCt68ey7z", 648 | "version": 13, 649 | "weekStart": "" 650 | } -------------------------------------------------------------------------------- /dashboard-templates/dashboard-template-simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "target": { 12 | "limit": 100, 13 | "matchAny": false, 14 | "tags": [], 15 | "type": "dashboard" 16 | }, 17 | "type": "dashboard" 18 | } 19 | ] 20 | }, 21 | "editable": true, 22 | "fiscalYearStartMonth": 0, 23 | "graphTooltip": 0, 24 | "id": 24, 25 | "iteration": 1649183711982, 26 | "links": [], 27 | "liveNow": false, 28 | "panels": [ 29 | { 30 | "aliasColors": { 31 | "AccelerationY": "#3d85c6", 32 | "AngularRateY": "semi-dark-blue" 33 | }, 34 | "bars": false, 35 | "dashLength": 10, 36 | "dashes": false, 37 | "datasource": { 38 | "type": "influxdb" 39 | }, 40 | "description": "", 41 | "fill": 1, 42 | "fillGradient": 0, 43 | "gridPos": { 44 | "h": 16, 45 | "w": 24, 46 | "x": 0, 47 | "y": 0 48 | }, 49 | "hiddenSeries": false, 50 | "id": 2, 51 | "legend": { 52 | "avg": false, 53 | "current": false, 54 | "max": false, 55 | "min": false, 56 | "show": true, 57 | "total": false, 58 | "values": false 59 | }, 60 | "lines": true, 61 | "linewidth": 1, 62 | "nullPointMode": "null", 63 | "options": { 64 | "alertThreshold": true 65 | }, 66 | "percentage": false, 67 | "pluginVersion": "8.4.5-56740", 68 | "pointradius": 0.5, 69 | "points": true, 70 | "renderer": "flot", 71 | "seriesOverrides": [], 72 | "spaceLength": 10, 73 | "stack": false, 74 | "steppedLine": false, 75 | "targets": [ 76 | { 77 | "datasource": { 78 | "type": "influxdb" 79 | }, 80 | "query": "from(bucket: v.defaultBucket)\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\r\n |> filter(fn: (r) => r[\"_measurement\"] =~ /${DEVICE:regex}/)\r\n |> filter(fn: (r) => r[\"_field\"] =~ /${SIGNAL:regex}/)\r\n |> yield(name: \"mean\")", 81 | "refId": "A" 82 | } 83 | ], 84 | "thresholds": [], 85 | "timeRegions": [], 86 | "title": "${SIGNAL}", 87 | "tooltip": { 88 | "shared": true, 89 | "sort": 0, 90 | "value_type": "individual" 91 | }, 92 | "type": "graph", 93 | "xaxis": { 94 | "mode": "time", 95 | "show": true, 96 | "values": [] 97 | }, 98 | "yaxes": [ 99 | { 100 | "format": "short", 101 | "logBase": 1, 102 | "show": true 103 | }, 104 | { 105 | "format": "short", 106 | "logBase": 1, 107 | "show": true 108 | } 109 | ], 110 | "yaxis": { 111 | "align": false 112 | } 113 | } 114 | ], 115 | "refresh": false, 116 | "schemaVersion": 35, 117 | "style": "dark", 118 | "tags": [], 119 | "templating": { 120 | "list": [ 121 | { 122 | "current": { 123 | "selected": true 124 | }, 125 | "definition": "import \"influxdata/influxdb/schema\"\r\nschema.measurements(bucket: v.defaultBucket)", 126 | "hide": 0, 127 | "includeAll": false, 128 | "label": "DEVICE", 129 | "multi": false, 130 | "name": "DEVICE", 131 | "options": [], 132 | "query": "import \"influxdata/influxdb/schema\"\r\nschema.measurements(bucket: v.defaultBucket)", 133 | "refresh": 1, 134 | "regex": "", 135 | "skipUrlSync": false, 136 | "sort": 0, 137 | "type": "query" 138 | }, 139 | { 140 | "current": { 141 | "selected": true 142 | }, 143 | "definition": "import \"influxdata/influxdb/schema\"\r\n\r\nschema.fieldKeys(bucket: v.defaultBucket)", 144 | "hide": 0, 145 | "includeAll": false, 146 | "label": "SIGNAL", 147 | "multi": true, 148 | "name": "SIGNAL", 149 | "options": [], 150 | "query": "import \"influxdata/influxdb/schema\"\r\n\r\nschema.fieldKeys(bucket: v.defaultBucket)", 151 | "refresh": 1, 152 | "regex": "", 153 | "skipUrlSync": false, 154 | "sort": 0, 155 | "type": "query" 156 | } 157 | ] 158 | }, 159 | "time": { 160 | "from": "now-2d", 161 | "to": "now" 162 | }, 163 | "timepicker": {}, 164 | "timezone": "", 165 | "title": "CANedge InfluxDB Writer - Simple", 166 | "uid": "nCt68ey7z4", 167 | "version": 2, 168 | "weekStart": "" 169 | } -------------------------------------------------------------------------------- /dbc_files/canmod-gps.dbc: -------------------------------------------------------------------------------- 1 | VERSION "" 2 | 3 | 4 | NS_ : 5 | NS_DESC_ 6 | CM_ 7 | BA_DEF_ 8 | BA_ 9 | VAL_ 10 | CAT_DEF_ 11 | CAT_ 12 | FILTER 13 | BA_DEF_DEF_ 14 | EV_DATA_ 15 | ENVVAR_DATA_ 16 | SGTYPE_ 17 | SGTYPE_VAL_ 18 | BA_DEF_SGTYPE_ 19 | BA_SGTYPE_ 20 | SIG_TYPE_REF_ 21 | VAL_TABLE_ 22 | SIG_GROUP_ 23 | SIG_VALTYPE_ 24 | SIGTYPE_VALTYPE_ 25 | BO_TX_BU_ 26 | BA_DEF_REL_ 27 | BA_REL_ 28 | BA_DEF_DEF_REL_ 29 | BU_SG_REL_ 30 | BU_EV_REL_ 31 | BU_BO_REL_ 32 | SG_MUL_VAL_ 33 | 34 | BS_: 35 | 36 | BU_: 37 | 38 | 39 | BO_ 3 gnss_pos: 8 Vector__XXX 40 | SG_ PositionAccuracy : 58|6@1+ (1,0) [0|63] "m" Vector__XXX 41 | SG_ Latitude : 1|28@1+ (1e-06,-90) [-90|90] "deg" Vector__XXX 42 | SG_ Longitude : 29|29@1+ (1e-06,-180) [-180|180] "deg" Vector__XXX 43 | SG_ PositionValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 44 | 45 | BO_ 2 gnss_time: 6 Vector__XXX 46 | SG_ TimeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 47 | SG_ TimeConfirmed : 1|1@1+ (1,0) [0|1] "" Vector__XXX 48 | SG_ Epoch : 8|40@1+ (0.001,1577840400) [1577840400|2677352027] "sec" Vector__XXX 49 | 50 | BO_ 5 gnss_attitude: 8 Vector__XXX 51 | SG_ AttitudeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 52 | SG_ Roll : 1|12@1+ (0.1,-180) [-180|180] "deg" Vector__XXX 53 | SG_ Pitch : 22|12@1+ (0.1,-90) [-90|90] "deg" Vector__XXX 54 | SG_ Heading : 43|12@1+ (0.1,0) [0|360] "deg" Vector__XXX 55 | SG_ RollAccuracy : 13|9@1+ (0.1,0) [0|50] "deg" Vector__XXX 56 | SG_ PitchAccuracy : 34|9@1+ (0.1,0) [0|50] "deg" Vector__XXX 57 | SG_ HeadingAccuracy : 55|9@1+ (0.1,0) [0|50] "deg" Vector__XXX 58 | 59 | BO_ 6 gnss_odo: 8 Vector__XXX 60 | SG_ DistanceTrip : 1|22@1+ (1,0) [0|4194303] "m" Vector__XXX 61 | SG_ DistanceAccuracy : 23|19@1+ (1,0) [0|524287] "m" Vector__XXX 62 | SG_ DistanceValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 63 | SG_ DistanceTotal : 42|22@1+ (1,0) [0|4194303] "km" Vector__XXX 64 | 65 | BO_ 1 gnss_status: 1 Vector__XXX 66 | SG_ FixType : 0|3@1+ (1,0) [0|5] "" Vector__XXX 67 | SG_ Satellites : 3|5@1+ (1,0) [0|31] "" Vector__XXX 68 | 69 | BO_ 4 gnss_altitude: 4 Vector__XXX 70 | SG_ AltitudeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 71 | SG_ Altitude : 1|18@1+ (0.1,-6000) [-6000|20000] "m" Vector__XXX 72 | SG_ AltitudeAccuracy : 19|13@1+ (1,0) [0|8000] "m" Vector__XXX 73 | 74 | BO_ 8 gnss_geofence: 2 Vector__XXX 75 | SG_ FenceValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 76 | SG_ FenceCombined : 1|2@1+ (1,0) [0|1] "" Vector__XXX 77 | SG_ Fence1 : 8|2@1+ (1,0) [0|1] "" Vector__XXX 78 | SG_ Fence2 : 10|2@1+ (1,0) [0|1] "" Vector__XXX 79 | SG_ Fence3 : 12|2@1+ (1,0) [0|1] "" Vector__XXX 80 | SG_ Fence4 : 14|2@1+ (1,0) [0|1] "" Vector__XXX 81 | 82 | BO_ 7 gnss_speed: 5 Vector__XXX 83 | SG_ Speed : 1|20@1+ (0.001,0) [0|1048.575] "m/s" Vector__XXX 84 | SG_ SpeedAccuracy : 21|19@1+ (0.001,0) [0|524.287] "m/s" Vector__XXX 85 | SG_ SpeedValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 86 | 87 | BO_ 9 gnss_imu: 8 Vector__XXX 88 | SG_ AccelerationX : 1|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX 89 | SG_ AccelerationY : 11|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX 90 | SG_ AccelerationZ : 21|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX 91 | SG_ AngularRateX : 31|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX 92 | SG_ AngularRateY : 42|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX 93 | SG_ AngularRateZ : 53|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX 94 | SG_ ImuValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX 95 | 96 | 97 | 98 | CM_ BO_ 3 "GNSS position"; 99 | CM_ SG_ 3 PositionAccuracy "Accuracy of position"; 100 | CM_ SG_ 3 Latitude "Latitude"; 101 | CM_ SG_ 3 Longitude "Longitude"; 102 | CM_ SG_ 3 PositionValid "Position validity"; 103 | CM_ BO_ 2 "GNSS time"; 104 | CM_ SG_ 2 TimeValid "Time validity"; 105 | CM_ SG_ 2 TimeConfirmed "Time confirmed"; 106 | CM_ SG_ 2 Epoch "Epoch time"; 107 | CM_ BO_ 5 "GNSS attitude"; 108 | CM_ SG_ 5 AttitudeValid "Attitude validity"; 109 | CM_ SG_ 5 Roll "Vehicle roll"; 110 | CM_ SG_ 5 Pitch "Vehicle pitch"; 111 | CM_ SG_ 5 Heading "Vehicle heading"; 112 | CM_ SG_ 5 RollAccuracy "Vehicle roll accuracy"; 113 | CM_ SG_ 5 PitchAccuracy "Vehicle pitch accuracy"; 114 | CM_ SG_ 5 HeadingAccuracy "Vehicle heading accuracy"; 115 | CM_ BO_ 6 "GNSS odometer"; 116 | CM_ SG_ 6 DistanceTrip "Distance traveled since last reset"; 117 | CM_ SG_ 6 DistanceAccuracy "Distance accuracy (1-sigma)"; 118 | CM_ SG_ 6 DistanceTotal "Distance traveled in total"; 119 | CM_ BO_ 1 "GNSS information"; 120 | CM_ SG_ 1 FixType "Fix type"; 121 | CM_ SG_ 1 Satellites "Number of satellites used"; 122 | CM_ BO_ 4 "GNSS altitude"; 123 | CM_ SG_ 4 AltitudeValid "Altitude validity"; 124 | CM_ SG_ 4 Altitude "Altitude"; 125 | CM_ SG_ 4 AltitudeAccuracy "Accuracy of altitude"; 126 | CM_ BO_ 8 "GNSS geofence(s)"; 127 | CM_ SG_ 8 FenceValid "Geofencing status"; 128 | CM_ SG_ 8 FenceCombined "Combined (logical OR) state of all geofences"; 129 | CM_ SG_ 8 Fence1 "Geofence 1 state"; 130 | CM_ SG_ 8 Fence2 "Geofence 2 state"; 131 | CM_ SG_ 8 Fence3 "Geofence 3 state"; 132 | CM_ SG_ 8 Fence4 "Geofence 4 state"; 133 | CM_ BO_ 7 "GNSS speed"; 134 | CM_ SG_ 7 Speed "Speed"; 135 | CM_ SG_ 7 SpeedAccuracy "Speed accuracy"; 136 | CM_ BO_ 9 "GNSS IMU"; 137 | CM_ SG_ 9 AccelerationX "X acceleration with a resolution of 0.125 m/s^2"; 138 | CM_ SG_ 9 AccelerationY "Y acceleration with a resolution of 0.125 m/s^2"; 139 | CM_ SG_ 9 AccelerationZ "Z acceleration with a resolution of 0.125 m/s^2"; 140 | CM_ SG_ 9 AngularRateX "X angular rate with a resolution of 0.25 deg/s"; 141 | CM_ SG_ 9 AngularRateY "Y angular rate with a resolution of 0.25 deg/s"; 142 | CM_ SG_ 9 AngularRateZ "Z angular rate with a resolution of 0.25 deg/s"; 143 | VAL_ 3 PositionValid 0 "Invalid" 1 "Valid" ; 144 | VAL_ 2 TimeValid 0 "Invalid" 1 "Valid" ; 145 | VAL_ 2 TimeConfirmed 0 "Unconfirmed" 1 "Confirmed" ; 146 | VAL_ 5 AttitudeValid 0 "Invalid" 1 "Valid" ; 147 | VAL_ 6 DistanceValid 0 "Invalid" 1 "Valid" ; 148 | VAL_ 1 FixType 0 "No fix" 1 "Dead reckoning only" 2 "2D-fix" 3 "3D-fix" 4 "GNSS + dead reckoning combined" 5 "Time only fix" ; 149 | VAL_ 4 AltitudeValid 0 "Invalid" 1 "Valid" ; 150 | VAL_ 8 FenceValid 0 "Invalid" 1 "Valid" ; 151 | VAL_ 8 FenceCombined 0 "Unknown" 1 "Inside" 2 "Outside" ; 152 | VAL_ 8 Fence1 0 "Unknown" 1 "Inside" 2 "Outside" ; 153 | VAL_ 8 Fence2 0 "Unknown" 1 "Inside" 2 "Outside" ; 154 | VAL_ 8 Fence3 0 "Unknown" 1 "Inside" 2 "Outside" ; 155 | VAL_ 8 Fence4 0 "Unknown" 1 "Inside" 2 "Outside" ; 156 | VAL_ 7 SpeedValid 0 "Invalid" 1 "Valid" ; 157 | VAL_ 9 ImuValid 0 "Invalid" 1 "Valid" ; 158 | 159 | -------------------------------------------------------------------------------- /dbc_files/nissan_uds.dbc: -------------------------------------------------------------------------------- 1 | VERSION "" 2 | 3 | 4 | NS_ : 5 | NS_DESC_ 6 | CM_ 7 | BA_DEF_ 8 | BA_ 9 | VAL_ 10 | CAT_DEF_ 11 | CAT_ 12 | FILTER 13 | BA_DEF_DEF_ 14 | EV_DATA_ 15 | ENVVAR_DATA_ 16 | SGTYPE_ 17 | SGTYPE_VAL_ 18 | BA_DEF_SGTYPE_ 19 | BA_SGTYPE_ 20 | SIG_TYPE_REF_ 21 | VAL_TABLE_ 22 | SIG_GROUP_ 23 | SIG_VALTYPE_ 24 | SIGTYPE_VALTYPE_ 25 | BO_TX_BU_ 26 | BA_DEF_REL_ 27 | BA_REL_ 28 | BA_DEF_DEF_REL_ 29 | BU_SG_REL_ 30 | BU_EV_REL_ 31 | BU_BO_REL_ 32 | SG_MUL_VAL_ 33 | 34 | BS_: 35 | 36 | BU_: 37 | 38 | 39 | BO_ 1979 SoC_Temp: 55 Vector__XXX 40 | SG_ SoC m1 : 279|24@0+ (0.0001,0) [0|0] "%" Vector__XXX 41 | SG_ BatPackTemp1 m4 : 40|8@1+ (1,0) [0|0] "%" Vector__XXX 42 | SG_ BatPackTemp2 m4 : 64|8@1+ (1,0) [0|0] "%" Vector__XXX 43 | SG_ BatPackTemp4 m4 : 112|8@1+ (1,0) [0|0] "%" Vector__XXX 44 | SG_ response m97M : 16|8@1+ (1,0) [0|0] "" Vector__XXX 45 | SG_ service M : 8|8@1+ (1,0) [0|0] "" Vector__XXX 46 | 47 | 48 | 49 | BA_DEF_ BO_ "VFrameFormat" ENUM "StandardCAN","ExtendedCAN","StandardCAN_FD","ExtendedCAN_FD","J1939PG"; 50 | BA_DEF_ "ProtocolType" STRING ; 51 | BA_DEF_DEF_ "VFrameFormat" ""; 52 | BA_DEF_DEF_ "ProtocolType" ""; 53 | BA_ "ProtocolType" ""; 54 | 55 | SG_MUL_VAL_ 1979 SoC response 1-1; 56 | SG_MUL_VAL_ 1979 BatPackTemp1 response 4-4; 57 | SG_MUL_VAL_ 1979 BatPackTemp2 response 4-4; 58 | SG_MUL_VAL_ 1979 BatPackTemp4 response 4-4; 59 | SG_MUL_VAL_ 1979 response service 97-97; 60 | 61 | -------------------------------------------------------------------------------- /deploy-aws-lambda/README.md: -------------------------------------------------------------------------------- 1 | # AWS Lambda Automation 2 | 3 | AWS Lambda functions are a smart way to auto-execute code on every log file upload. 4 | 5 | Below we describe how you can set up an AWS Lambda function to automatically run the script when a new log file is uploaded. 6 | 7 | ---- 8 | 9 | ## Before you deploy 10 | 11 | - Test that your InfluxDB/Grafana setup works with the sample data 12 | - Test that your setup works with your own data/server when manually running the script from your PC 13 | - Ensure that the S3 credentials in your `inputs.py` provide admin rights to your AWS account 14 | - Make sure your log file split size, `inputs.py` and InfluxDB account settings are setup as needed (see below) 15 | 16 | ---- 17 | 18 | ## Quick start deployment via Python [recommended] 19 | 20 | Deploy your Lambda function via the steps below (Windows): 21 | 22 | 1. Double-click `install.bat` 23 | 2. Double-click `deploy_aws_lambda.bat` 24 | 25 | Next, test your Lambda function by uploading a log file to your S3 bucket (in the `device_id/session/split.MFX` structure). 26 | 27 | Note: You can use `delete_aws_lambda.bat` to remove the Lambda execution role, Lambda function and S3 event triggers. This is useful if you e.g. need to update your function. Once deleted, you can run `deploy_aws_lambda.bat` again to re-deploy. 28 | 29 | 30 | #### Test the Lambda function 31 | You can log in to your AWS account, go to `Lambda/Functions/canedge-influxdb-writer` and click `Monitor/Logs` to see all invocations and the resulting output. This lets you verify that your uploaded log files are processed as expected. 32 | 33 | ---- 34 | 35 | ### Dependencies, log file size and InfluxDB account type 36 | If you're initially testing your setup with a free InfluxDB Cloud starter account, keep in mind that there is a 'write restriction' of 5 MB per 5 minutes. This means that if you try to write e.g. 30 MB of data in one log file, it will take > 30 minutes. This exceeds the AWS Lambda max timeout. If you're using AWS Lambda, we recommend that you ensure your log file split size is 2-5 MB and that the data you extract is optimized (i.e. only push relevant signals at relevant resampled frequency). 37 | 38 | For 'production setups', we recommend using a paid InfluxDB Cloud or self-hosting InfluxDB if you wish to use AWS Lambda functions. 39 | 40 | ---- 41 | 42 | ## Regarding AWS and Lambda costs 43 | We recommend tracking your AWS billing costs when working with Lambda functions to ensure everything is set up correctly. We do not take responsibility for incorrect implementations or unexpected costs related to implementation of the below. Note also that the below is intended as a 'getting started' guide - not a fully cost optimized setup. -------------------------------------------------------------------------------- /deploy-aws-lambda/README_manual_deployment.md: -------------------------------------------------------------------------------- 1 | # Manual deployment [not recommended] 2 | 3 | Below steps can be taken to manually deploy your lambda function via the AWS GUI. However, we strongly recommend the above batch script method to ensure correct installation. 4 | 5 | 1. Create an [IAM execution](https://docs.aws.amazon.com/lambda/latest/dg/lambda-intro-execution-role.html) role (not user) with permissions: `AWSLambdaBasicExecutionRole` + `AmazonS3FullAccess` 6 | 2. Go to 'Services/Lambda', then select your S3 bucket region (upper right corner) 7 | 3. Add a new Lambda function with a name, a Python 3.7 environment and your new execution role 8 | 4. Add a 'Trigger': Select S3, your test bucket, `All object create events` and suffix `.MF4` 9 | 5. Create a zip with `lambda_function.py`, `utils.py`, `utils_db.py`, `inputs.py` and your `*.dbc` (ensure your inputs are updated) 10 | 6. Upload the zip via the Lambda 'Actions' button and confirm that your code shows in the online code editor 11 | 7. Find the pre-built layer ARN for your AWS S3 region in the `lambda_layer_arns.csv` file 12 | 8. In the 'Layers' section, click 'Add a layer/Specify an ARN' and parse the ARN matching your region 13 | 9. Go to 'Configuration/General configuration' and set the 'Timeout' to `3 min` and memory to `1024 MB` (you can tweak these later) 14 | 10. Save the script and click 'Deploy' (important), then 'Test' (using the below test data) and verify that it succeeds 15 | 11. Click 'Test' in the upper right corner 16 | 12. Add the test event JSON content below (update to match details of an actual MF4 test file on S3) 17 | 13. When you're ready, click 'Actions/Publish' to save a new version 18 | 14. In AWS Services, go to Cloudwatch/Logs/Log groups and click your Lambda function to monitor events 19 | 15. Download a logfile via CANcloud from your main bucket and upload to your test bucket via CANcloud (from the Home tab) 20 | 16. Verify that the Lambda function is triggered within a minute and check from the log output that it processes the data 21 | 17. Verify that data is written correctly to InfluxDB 22 | 23 | #### Lambda function test event data 24 | 25 | ``` 26 | { 27 | "Records": [ 28 | { 29 | "s3": { 30 | "bucket": { 31 | "name": "your-source-bucket-name", 32 | "arn": "arn:aws:s3:::your-source-bucket-name" 33 | }, 34 | "object": { 35 | "key": "//" 36 | } 37 | } 38 | } 39 | ] 40 | } 41 | ``` 42 | -------------------------------------------------------------------------------- /deploy-aws-lambda/arn-layers/README.md: -------------------------------------------------------------------------------- 1 | # Build ARN layers for multiple regions 2 | 1. Install [Docker Desktop for Windows](https://hub.docker.com/editions/community/docker-ce-desktop-windows) 3 | 2. Open the Docker Desktop and wait until it's running 4 | 3. Open your command prompt and run `docker pull public.ecr.aws/sam/build-python3.9` 5 | 4. Install the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) 6 | 5. Cofigure your AWS CLI via `aws configure --profile [your_profile_name]`, providing your admin credentials and output type `json` 7 | 6. In Docker go to 'Settings/Resources/File Sharing', then add your new folder 8 | 7. Update the information in `python build_layers.py` to fit your requirements (note that you can only create layers for regions that are accessible via your AWS account) 9 | 8. Once updated, start by running the script with `run_req_build = True` to create the dependencies via Docker and the script 10 | 9. Next, zip the resulting `python/` folder into a zip file named accordingly to match the name in the script 11 | 10. Set `run_req_build = False` and run the script again to start publishing the layers to the regions you've specified -------------------------------------------------------------------------------- /deploy-aws-lambda/arn-layers/build_layers.bat: -------------------------------------------------------------------------------- 1 | cd /d "%~dp0" & python build_layers.py & pause -------------------------------------------------------------------------------- /deploy-aws-lambda/arn-layers/build_layers.py: -------------------------------------------------------------------------------- 1 | # about: This script lets you build ARN layers based on the requirements.txt next to the script - see the separate README guide 2 | 3 | # requires Docker to be running 4 | import os, json, sys 5 | import subprocess 6 | 7 | # specify base details and region list 8 | profile_name = "admin_2022" 9 | layer_name = "canedge-influxdb-writer-v3" 10 | zip_name = "canedge-influxdb-writer.zip" 11 | layer_description = "CSS Electronics canedge-influxdb-writer dependencies for use in AWS Lambda functions" 12 | csv_path = "lambda_layer_arns.csv" 13 | run_req_build = False 14 | update_csv_file = True 15 | 16 | # not included in the prebuilt list : 17 | # "cn-north-1", 18 | # "cn-northwest-1", 19 | # "eu-central-2", 20 | # "eu-south-1", 21 | # "eu-south-2", 22 | # "me-south-1", 23 | # "me-central-1", 24 | # "us-gov-east-1", 25 | # "us-gov-west-1", 26 | # "ap-east-1" 27 | 28 | regions = [ 29 | "ap-northeast-1", 30 | "ap-northeast-2", 31 | "ap-northeast-3", 32 | "ap-south-1", 33 | "ap-southeast-1", 34 | "ap-southeast-2", 35 | "ca-central-1", 36 | "eu-central-1", 37 | "eu-north-1", 38 | "eu-west-1", 39 | "eu-west-2", 40 | "eu-west-3", 41 | "sa-east-1", 42 | "us-east-1", 43 | "us-east-2", 44 | "us-west-1", 45 | "us-west-2", 46 | ] 47 | 48 | # ----------------------------------------------- 49 | # create zip with requirements.txt dependencies 50 | if run_req_build: 51 | os.system("rmdir /S/Q python") 52 | os.system("mkdir python\lib\python3.9\site-packages") 53 | os.system( 54 | 'docker run -v "%cd%":/var/task "public.ecr.aws/sam/build-python3.9" /bin/sh -c "pip install -r requirements.txt -t python/lib/python3.9/site-packages/; exit"' 55 | ) 56 | os.system("rmdir /S/Q python\\lib\\python3.9\\site-packages\\botocore") 57 | print( 58 | f"\nCompleted building dependencies, now exiting script.\nNext, manually zip the python/ folder to '{zip_name}', then set run_req_build = False and run the script again" 59 | ) 60 | sys.exit() 61 | 62 | # ----------------------------------------------- 63 | # for each region, publish AWS layer with build zip 64 | region_arn_list = [] 65 | 66 | for region in regions: 67 | print(f"\n------------------------------\nExecuting region {region}") 68 | 69 | # create the layers 70 | arn_output = subprocess.check_output( 71 | f'aws --profile admin_2022 lambda publish-layer-version --region {region} --layer-name {layer_name} --description "{layer_description}" --cli-connect-timeout 6000 --license-info "MIT" --zip-file "fileb://{zip_name}" --compatible-runtimes python3.9', 72 | shell=True, 73 | ).decode("utf-8") 74 | 75 | version = int(json.loads(arn_output)["Version"]) 76 | 77 | # make them public 78 | make_public = subprocess.check_output( 79 | f"aws --profile {profile_name} lambda add-layer-version-permission --layer-name {layer_name} --version-number {version} --statement-id allAccountsExample2 --principal * --action lambda:GetLayerVersion --region {region}", 80 | shell=True, 81 | ) 82 | 83 | print("Build layer:", arn_output) 84 | print("Make public:", make_public) 85 | 86 | arn = str(json.loads(arn_output)["LayerVersionArn"]) 87 | region_arn = f"{region},{arn}\n" 88 | region_arn_list.append(region_arn) 89 | 90 | # write data to CSV 91 | if update_csv_file: 92 | output_file = open(csv_path, "w") 93 | for region_arn in region_arn_list: 94 | output_file.write(region_arn) 95 | 96 | output_file.close() 97 | 98 | print( 99 | f"Completed writing {len(region_arn_list)} out of {len(regions)} to CSV {csv_path}" 100 | ) 101 | -------------------------------------------------------------------------------- /deploy-aws-lambda/arn-layers/lambda_layer_arns.csv: -------------------------------------------------------------------------------- 1 | ap-northeast-1,arn:aws:lambda:ap-northeast-1:319723967016:layer:canedge-influxdb-writer-v3:1 2 | ap-northeast-2,arn:aws:lambda:ap-northeast-2:319723967016:layer:canedge-influxdb-writer-v3:1 3 | ap-northeast-3,arn:aws:lambda:ap-northeast-3:319723967016:layer:canedge-influxdb-writer-v3:1 4 | ap-south-1,arn:aws:lambda:ap-south-1:319723967016:layer:canedge-influxdb-writer-v3:1 5 | ap-southeast-1,arn:aws:lambda:ap-southeast-1:319723967016:layer:canedge-influxdb-writer-v3:1 6 | ap-southeast-2,arn:aws:lambda:ap-southeast-2:319723967016:layer:canedge-influxdb-writer-v3:1 7 | ca-central-1,arn:aws:lambda:ca-central-1:319723967016:layer:canedge-influxdb-writer-v3:1 8 | eu-central-1,arn:aws:lambda:eu-central-1:319723967016:layer:canedge-influxdb-writer-v3:1 9 | eu-north-1,arn:aws:lambda:eu-north-1:319723967016:layer:canedge-influxdb-writer-v3:1 10 | eu-west-1,arn:aws:lambda:eu-west-1:319723967016:layer:canedge-influxdb-writer-v3:1 11 | eu-west-2,arn:aws:lambda:eu-west-2:319723967016:layer:canedge-influxdb-writer-v3:1 12 | eu-west-3,arn:aws:lambda:eu-west-3:319723967016:layer:canedge-influxdb-writer-v3:1 13 | sa-east-1,arn:aws:lambda:sa-east-1:319723967016:layer:canedge-influxdb-writer-v3:1 14 | us-east-1,arn:aws:lambda:us-east-1:319723967016:layer:canedge-influxdb-writer-v3:1 15 | us-east-2,arn:aws:lambda:us-east-2:319723967016:layer:canedge-influxdb-writer-v3:1 16 | us-west-1,arn:aws:lambda:us-west-1:319723967016:layer:canedge-influxdb-writer-v3:1 17 | us-west-2,arn:aws:lambda:us-west-2:319723967016:layer:canedge-influxdb-writer-v3:1 18 | -------------------------------------------------------------------------------- /deploy-aws-lambda/arn-layers/requirements.txt: -------------------------------------------------------------------------------- 1 | aiobotocore==2.5.0 2 | aiohttp==3.8.1 3 | aioitertools==0.10.0 4 | aiosignal==1.2.0 5 | async-timeout==4.0.2 6 | attrs==21.4.0 7 | botocore==1.29.76 8 | can_decoder>=0.1.9 9 | canedge-browser>=0.0.8 10 | canmatrix==0.9.5 11 | certifi==2021.10.8 12 | charset-normalizer==2.0.12 13 | click==8.1.2 14 | colorama==0.4.4 15 | frozenlist==1.3.0 16 | fsspec==2023.4.0 17 | future==0.18.3 18 | idna==3.4 19 | influxdb-client==1.35.0 20 | J1939-PGN==0.4 21 | jmespath==0.10.0 22 | mdf-iter>=2.1.1 23 | multidict==6.0.2 24 | numpy==1.24.1 25 | pandas==1.5.3 26 | python-dateutil==2.8.2 27 | pytz==2022.1 28 | reactivex==4.0.4 29 | s3fs==2023.4.0 30 | six==1.16.0 31 | typing_extensions==4.1.1 32 | urllib3==1.26.9 33 | wrapt==1.14.0 34 | yarl==1.7.2 35 | -------------------------------------------------------------------------------- /deploy-aws-lambda/delete_aws_lambda.bat: -------------------------------------------------------------------------------- 1 | env\Scripts\activate & python delete_aws_lambda.py & pause -------------------------------------------------------------------------------- /deploy-aws-lambda/delete_aws_lambda.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | import sys 4 | from pathlib import Path 5 | parent = Path(__file__).resolve().parent.parent 6 | sys.path.append(str(parent)) 7 | import inputs as inp 8 | 9 | 10 | # Switch to working directory of the bat file 11 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 12 | 13 | # define initial variables 14 | AWS_ACCESS_KEY = inp.key 15 | AWS_SECRET_KEY = inp.secret 16 | 17 | try: 18 | REGION = inp.endpoint.split(".")[1] 19 | except: 20 | print(f"Unable to extract region from {inp.endpoint} - check if correct syntax is used ala http://s3.region.amazonaws.com") 21 | print("Exiting script") 22 | sys.exit() 23 | 24 | S3_BUCKET = inp.devices[0].split("/")[0] 25 | 26 | LAMBDA_ROLE_NAME = "canedge-influxdb-lambda-role" 27 | LAMBDA_FUNCTION_NAME = "canedge-influxdb-writer" 28 | 29 | print( 30 | "This batch script will remove the Lambda function, the Lambda execution role and S3 bucket triggers." 31 | ) 32 | print( 33 | "- Ensure that you have added your admin AWS S3 credentials and details to the inputs.py" 34 | ) 35 | print( 36 | "- Double check that the removal was done as expected by reviewing your AWS account" 37 | ) 38 | print( 39 | "- We do not take any responsibility for issues involved in the use of this script" 40 | ) 41 | 42 | # Configure boto3 client 43 | session = boto3.Session( 44 | aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, region_name=REGION 45 | ) 46 | 47 | # Get AWS S3 bucket region 48 | s3 = session.client("s3") 49 | 50 | try: 51 | response = s3.get_bucket_location(Bucket=S3_BUCKET) 52 | REGION = response["LocationConstraint"] 53 | except Exception as e: 54 | print("\n") 55 | print(e) 56 | print("\nUnable to establish S3 connection.") 57 | print("- Please check if your S3 credentials in inputs.py are correct and provide administrative rights") 58 | print("- Please check if the 1st entry in your 'devices' list correctly includes your S3 bucket name") 59 | print("- Exiting script") 60 | sys.exit() 61 | 62 | 63 | 64 | # Get AWS account ID 65 | sts = session.client("sts") 66 | response = sts.get_caller_identity() 67 | AWS_ACCOUNT_ID = response["Account"] 68 | 69 | print( 70 | f"--------------\nConfigured AWS boto3 client and extracted S3 bucket region {REGION} and account ID {AWS_ACCOUNT_ID}" 71 | ) 72 | 73 | # delete the lambda role if it exists 74 | LAMBDA_ROLE_ARN = f"arn:aws:iam::{AWS_ACCOUNT_ID}:role/{LAMBDA_ROLE_NAME}" 75 | 76 | iam = session.client("iam") 77 | 78 | try: 79 | iam.detach_role_policy( 80 | RoleName=LAMBDA_ROLE_NAME, 81 | PolicyArn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", 82 | ) 83 | iam.detach_role_policy( 84 | RoleName=LAMBDA_ROLE_NAME, 85 | PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess", 86 | ) 87 | iam.delete_role(RoleName=LAMBDA_ROLE_NAME) 88 | print(f"--------------\nIAM role {LAMBDA_ROLE_NAME} deleted") 89 | except iam.exceptions.NoSuchEntityException: 90 | print(f"--------------\nIAM role {LAMBDA_ROLE_NAME} does not exist") 91 | 92 | 93 | # Delete the lambda function if it exist 94 | lambda_client = session.client("lambda") 95 | try: 96 | lambda_client.delete_function(FunctionName=LAMBDA_FUNCTION_NAME) 97 | print( 98 | f"--------------\nDeleted the AWS Lambda function {LAMBDA_FUNCTION_NAME} in region {REGION}" 99 | ) 100 | except Exception as e: 101 | print("--------------\nUnable to delete lambda function") 102 | print(e) 103 | 104 | # Delete the S3 triggers by writing blank notification config 105 | notification_config = dict({}) 106 | 107 | try: 108 | s3.put_bucket_notification_configuration( 109 | Bucket=S3_BUCKET, NotificationConfiguration=notification_config 110 | ) 111 | print(f"--------------\nRemoved all event triggers from S3 bucket {S3_BUCKET}") 112 | except Exception as e: 113 | print(e) 114 | 115 | print("\nDeletion completed.") -------------------------------------------------------------------------------- /deploy-aws-lambda/deploy_aws_lambda.bat: -------------------------------------------------------------------------------- 1 | env\Scripts\activate & python deploy_aws_lambda.py & pause -------------------------------------------------------------------------------- /deploy-aws-lambda/deploy_aws_lambda.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import boto3 4 | import shutil 5 | import tempfile 6 | import datetime 7 | import sys 8 | import time 9 | 10 | from pathlib import Path 11 | parent = Path(__file__).resolve().parent.parent 12 | sys.path.append(str(parent)) 13 | import inputs as inp 14 | 15 | # Switch to working directory of the bat file 16 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 17 | 18 | # define initial variables 19 | AWS_ACCESS_KEY = inp.key 20 | AWS_SECRET_KEY = inp.secret 21 | 22 | try: 23 | REGION = inp.endpoint.split(".")[1] 24 | except: 25 | print(f"Unable to extract region from {inp.endpoint} - check if correct syntax is used ala http://s3.region.amazonaws.com") 26 | print("Exiting script") 27 | sys.exit() 28 | 29 | S3_BUCKET = inp.devices[0].split("/")[0] 30 | LAMBDA_ROLE_NAME = "canedge-influxdb-lambda-role" 31 | LAMBDA_FUNCTION_NAME = "canedge-influxdb-writer" 32 | 33 | PYTHON_BUILD = "python3.9" 34 | LAMBDA_HANDLER = "lambda_function.lambda_handler" 35 | LAMBDA_ZIP_FILE = "lambda_function" 36 | files_to_archive = ["lambda_function.py", "../inputs.py", "../utils.py","../utils_db.py", "../dbc_files"] 37 | ARN_LAYER_CSV = "arn-layers/lambda_layer_arns.csv" 38 | 39 | 40 | print("This script will deploy an AWS Lambda function in your AWS account.") 41 | print( 42 | "It will also add S3 triggers for MF4/MFC/MFE/MFM files and the relevant roles/permissions." 43 | ) 44 | print( 45 | "- Ensure that you've updated inputs.py with your details and tested this locally" 46 | ) 47 | print("- Ensure that the S3 credentials in your inputs.py have admin permissions") 48 | print( 49 | "- If you have previously deployed a Lambda function, use the deletion script to remove it" 50 | ) 51 | 52 | # Configure boto3 client 53 | session = boto3.Session( 54 | aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, region_name=REGION 55 | ) 56 | 57 | # Get AWS S3 bucket region 58 | s3 = session.client("s3") 59 | 60 | try: 61 | response = s3.get_bucket_location(Bucket=S3_BUCKET) 62 | REGION_BUCKET = response["LocationConstraint"] 63 | except Exception as e: 64 | print("\n") 65 | print(e) 66 | print("\nUnable to establish S3 connection.") 67 | print("- Please check if your S3 credentials in inputs.py are correct and provide administrative rights") 68 | print("- Please check if the 1st entry in your 'devices' list correctly includes your S3 bucket name") 69 | print("- Exiting script") 70 | sys.exit() 71 | 72 | if REGION_BUCKET != REGION: 73 | print(f"WARNING: Bucket region is {REGION_BUCKET} and differs from session region {REGION} - please review.") 74 | print("- Exiting script") 75 | 76 | sys.exit() 77 | 78 | 79 | 80 | # Get AWS account ID 81 | sts = session.client("sts") 82 | response = sts.get_caller_identity() 83 | AWS_ACCOUNT_ID = response["Account"] 84 | 85 | print( 86 | f"--------------\nConfigured AWS boto3 client and account ID {AWS_ACCOUNT_ID}" 87 | ) 88 | 89 | temp_dir = tempfile.mkdtemp() 90 | 91 | for f in files_to_archive[:-1]: 92 | shutil.copy2(f, temp_dir) 93 | 94 | shutil.copytree( 95 | files_to_archive[-1], f"{temp_dir}/{os.path.basename(files_to_archive[-1])}" 96 | ) 97 | 98 | shutil.make_archive(LAMBDA_ZIP_FILE, "zip", root_dir=temp_dir) 99 | 100 | print(f"--------------\nZipped relevant files into {LAMBDA_ZIP_FILE}.zip") 101 | 102 | # Look up the relevant pre-built Lambda ARN layer from the CSV lambda_layer_arns.csv based on region 103 | with open(ARN_LAYER_CSV, "r") as f: 104 | lines = f.readlines() 105 | for line in lines: 106 | if REGION in line: 107 | LAMBDA_ARN_LAYER = line.split(",")[1].replace("\n", "") 108 | 109 | print(f"--------------\nUsed {REGION} to lookup Lambda ARN layer {LAMBDA_ARN_LAYER}") 110 | 111 | # Create the lambda role if it does not already exist 112 | LAMBDA_ROLE_ARN = f"arn:aws:iam::{AWS_ACCOUNT_ID}:role/{LAMBDA_ROLE_NAME}" 113 | 114 | iam = session.client("iam") 115 | trust_policy = json.dumps( 116 | { 117 | "Version": "2012-10-17", 118 | "Statement": [ 119 | { 120 | "Effect": "Allow", 121 | "Principal": {"Service": "lambda.amazonaws.com"}, 122 | "Action": "sts:AssumeRole", 123 | } 124 | ], 125 | } 126 | ) 127 | 128 | 129 | try: 130 | role = iam.get_role(RoleName=LAMBDA_ROLE_NAME) 131 | print(f"--------------\nIAM role {LAMBDA_ROLE_NAME} already exists") 132 | except iam.exceptions.NoSuchEntityException: 133 | # with open("trust_policy.json", "r") as f: 134 | # trust_policy = f.read() 135 | iam.create_role(RoleName=LAMBDA_ROLE_NAME, AssumeRolePolicyDocument=trust_policy) 136 | print( 137 | f"--------------\nCreated new AWS lambda role {LAMBDA_ROLE_NAME} with ARN {LAMBDA_ROLE_ARN}" 138 | ) 139 | print("Waiting 10 seconds ...") 140 | time.sleep(10) 141 | iam.attach_role_policy( 142 | RoleName=LAMBDA_ROLE_NAME, 143 | PolicyArn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", 144 | ) 145 | print( 146 | f"--------------\nAttached role policy AWSLambdaBasicExecutionRole" 147 | ) 148 | print("Waiting 10 seconds ...") 149 | time.sleep(10) 150 | iam.attach_role_policy( 151 | RoleName=LAMBDA_ROLE_NAME, 152 | PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess", 153 | ) 154 | print( 155 | f"--------------\nAttached role policy AmazonS3FullAccess" 156 | ) 157 | print("Waiting 20 seconds ...") 158 | time.sleep(20) 159 | 160 | # Create the lambda function if it does not already exist 161 | lambda_client = session.client("lambda") 162 | try: 163 | lambda_client.create_function( 164 | FunctionName=LAMBDA_FUNCTION_NAME, 165 | Runtime=PYTHON_BUILD, 166 | Role=LAMBDA_ROLE_ARN, 167 | Handler=LAMBDA_HANDLER, 168 | Code={"ZipFile": open(f"{LAMBDA_ZIP_FILE}.zip", "rb").read()}, 169 | Timeout=180, 170 | MemorySize=1024, 171 | Layers=[LAMBDA_ARN_LAYER], 172 | ) 173 | print( 174 | f"--------------\nDeployed the AWS Lambda function {LAMBDA_FUNCTION_NAME} in region {REGION}" 175 | ) 176 | print("Waiting 10 seconds ...") 177 | time.sleep(10) 178 | except Exception as e: 179 | print("--------------\nUnable to create lambda function") 180 | print(e) 181 | 182 | 183 | # Create unique statement ID via timestamp and add S3 permission to trigger Lambda 184 | dt = datetime.datetime.now().strftime("%Y%m%d%H%M%S") 185 | statement_id = f"canedges3event{dt}" 186 | 187 | try: 188 | lambda_client.add_permission( 189 | FunctionName=LAMBDA_FUNCTION_NAME, 190 | StatementId=statement_id, 191 | Action="lambda:InvokeFunction", 192 | Principal="s3.amazonaws.com", 193 | SourceArn=f"arn:aws:s3:::{S3_BUCKET}", 194 | SourceAccount=AWS_ACCOUNT_ID, 195 | ) 196 | print( 197 | f"--------------\nAdded S3 InvokeFunction permissions for the Lambda execution role using the statement ID {statement_id}" 198 | ) 199 | print("Waiting 10 seconds ...") 200 | time.sleep(10) 201 | except Exception as e: 202 | print( 203 | f"--------------\nWARNING: Failed to add S3 InvokeFunction permissions for the Lambda execution role using the statement ID {statement_id}" 204 | ) 205 | print(e) 206 | 207 | # Add S3 event triggers for MF4/MFC/MFE/MFM suffixes 208 | notification_config = dict( 209 | { 210 | "LambdaFunctionConfigurations": [ 211 | { 212 | "Id": "MF4_TRIGGER", 213 | "LambdaFunctionArn": f"arn:aws:lambda:{REGION}:{AWS_ACCOUNT_ID}:function:{LAMBDA_FUNCTION_NAME}", 214 | "Events": ["s3:ObjectCreated:*"], 215 | "Filter": { 216 | "Key": {"FilterRules": [{"Name": "suffix", "Value": ".MF4"}]} 217 | }, 218 | }, 219 | { 220 | "Id": "MFC_TRIGGER", 221 | "LambdaFunctionArn": f"arn:aws:lambda:{REGION}:{AWS_ACCOUNT_ID}:function:{LAMBDA_FUNCTION_NAME}", 222 | "Events": ["s3:ObjectCreated:*"], 223 | "Filter": { 224 | "Key": {"FilterRules": [{"Name": "suffix", "Value": ".MFC"}]} 225 | }, 226 | }, 227 | { 228 | "Id": "MFE_TRIGGER", 229 | "LambdaFunctionArn": f"arn:aws:lambda:{REGION}:{AWS_ACCOUNT_ID}:function:{LAMBDA_FUNCTION_NAME}", 230 | "Events": ["s3:ObjectCreated:*"], 231 | "Filter": { 232 | "Key": {"FilterRules": [{"Name": "suffix", "Value": ".MFE"}]} 233 | }, 234 | }, 235 | { 236 | "Id": "MFM_TRIGGER", 237 | "LambdaFunctionArn": f"arn:aws:lambda:{REGION}:{AWS_ACCOUNT_ID}:function:{LAMBDA_FUNCTION_NAME}", 238 | "Events": ["s3:ObjectCreated:*"], 239 | "Filter": { 240 | "Key": {"FilterRules": [{"Name": "suffix", "Value": ".MFM"}]} 241 | }, 242 | }, 243 | ] 244 | } 245 | ) 246 | 247 | 248 | try: 249 | s3.put_bucket_notification_configuration( 250 | Bucket=S3_BUCKET, NotificationConfiguration=notification_config 251 | ) 252 | print(f"--------------\nAdded the S3 bucket {S3_BUCKET} as trigger source") 253 | except Exception as e: 254 | print(e) 255 | 256 | # clean up 257 | if os.path.exists(f"{LAMBDA_ZIP_FILE}.zip"): 258 | os.remove(f"{LAMBDA_ZIP_FILE}.zip") 259 | print(f"--------------\nClean up: {LAMBDA_ZIP_FILE}.zip has been deleted.") 260 | else: 261 | print(f"--------------\nClean up: {LAMBDA_ZIP_FILE}.zip was not deleted as it does not exist.") 262 | 263 | print(f"--------------\nComplete: Test your Lambda function by uploading a log file to S3.") 264 | -------------------------------------------------------------------------------- /deploy-aws-lambda/install.bat: -------------------------------------------------------------------------------- 1 | python -m venv env & env\Scripts\activate & pip install -r requirements.txt & pause -------------------------------------------------------------------------------- /deploy-aws-lambda/lambda_function.py: -------------------------------------------------------------------------------- 1 | import s3fs 2 | from utils import load_dbc_files, ProcessData, MultiFrameDecoder, restructure_data 3 | from utils_db import SetupInflux 4 | import inputs as inp 5 | 6 | 7 | def lambda_handler(event, context=None): 8 | bucket = event["Records"][0]["s3"]["bucket"]["name"] 9 | key = event["Records"][0]["s3"]["object"]["key"] 10 | log_files = [bucket + "/" + key] 11 | 12 | fs = s3fs.S3FileSystem(anon=False) 13 | db_list = load_dbc_files(inp.dbc_paths) 14 | 15 | # initialize connection to InfluxDB 16 | influx = SetupInflux(inp.influx_url, inp.token, inp.org_id, inp.influx_bucket, inp.res) 17 | 18 | # process the log files and write extracted signals to InfluxDB 19 | proc = ProcessData(fs, db_list, inp.signals, inp.days_offset) 20 | 21 | for log_file in log_files: 22 | df_raw, device_id = proc.get_raw_data(log_file, inp.pw) 23 | 24 | if inp.tp_type != "": 25 | tp = MultiFrameDecoder(inp.tp_type) 26 | df_raw = tp.combine_tp_frames(df_raw) 27 | 28 | df_phys = proc.extract_phys(df_raw) 29 | proc.print_log_summary(device_id, log_file, df_phys) 30 | 31 | df_phys = restructure_data(df_phys,inp.res) 32 | 33 | influx.write_signals(device_id, df_phys) 34 | -------------------------------------------------------------------------------- /deploy-aws-lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.26.59 2 | botocore==1.29.59 3 | jmespath==1.0.1 4 | python-dateutil==2.8.2 5 | s3transfer==0.6.0 6 | six==1.16.0 7 | urllib3==1.26.14 8 | -------------------------------------------------------------------------------- /inputs.py: -------------------------------------------------------------------------------- 1 | # ----------------------------------------------- 2 | # specify your InfluxDB details 3 | influx_bucket = "influx_bucket_name" 4 | token = "influx_token" 5 | influx_url = "influx_endpoint" 6 | org_id = "influx_org_id" 7 | 8 | # ----------------------------------------------- 9 | # specify devices to process from local disk via ["folder/device_id"] or S3 via ["bucket/device_id"] 10 | devices = ["LOG/2F6913DB"] 11 | 12 | # ----------------------------------------------- 13 | # specify DBC paths and a list of signals to process ([]: include all signals) 14 | # optionally include signal prefixes to make CAN ID, PGN and/or BusChannel explicit 15 | dbc_paths = ["dbc_files/canmod-gps.dbc"] 16 | signals = [] 17 | can_id_prefix = False 18 | pgn_prefix = False 19 | bus_prefix = False 20 | 21 | # specify resampling frequency. Setting this to "" means no resampling (much slower) 22 | res = "5S" 23 | 24 | # ----------------------------------------------- 25 | # specify whether to load data from S3 (and add server details if relevant) 26 | s3 = False 27 | key = "s3_key" 28 | secret = "s3_secret" 29 | endpoint = "s3_endpoint" # e.g. http://s3.us-east-1.amazonaws.com or http://192.168.0.1:9000 30 | region = "s3_region" # only relevant if you are using more recent builds of MinIO S3 as the backend 31 | # cert = "path/to/cert.crt" # if MinIO + TLS, add path to cert and update utils.py/setup_fs to verify 32 | 33 | # ----------------------------------------------- 34 | # if dynamic = True, data is loaded dynamically based on most recent data in InfluxDB - else default_start is used 35 | dynamic = True 36 | default_start = "2022-01-01 00:00:00" 37 | days_offset = 1 # offsets data to start at 'today - days_offset'. Set to None to use original timestamps 38 | 39 | # if you're using data encryption, you can add the password below 40 | pw = {"default": "password"} 41 | 42 | # if you need to process multi-frame data, set tp_type to "uds", "j1939" or "nmea" 43 | tp_type = "" 44 | -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | python -m venv env & env\Scripts\activate & pip install -r requirements.txt & env\Scripts\deactivate & pause -------------------------------------------------------------------------------- /main.bat: -------------------------------------------------------------------------------- 1 | env\Scripts\activate & python main.py & pause -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from utils import ( 2 | setup_fs, 3 | load_dbc_files, 4 | list_log_files, 5 | ProcessData, 6 | MultiFrameDecoder, 7 | restructure_data, 8 | add_signal_prefix 9 | ) 10 | from utils_db import SetupInflux 11 | import inputs as inp 12 | 13 | # initialize connection to InfluxDB + get latest data entries per device 14 | influx = SetupInflux(inp.influx_url, inp.token, inp.org_id, inp.influx_bucket, inp.res) 15 | start_times = influx.get_start_times(inp.devices, inp.default_start, inp.dynamic) 16 | 17 | # setup filesystem (local/S3), load DBC files and list log files for processing 18 | fs = setup_fs(inp.s3, inp.key, inp.secret, inp.endpoint, inp.region, passwords=inp.pw) 19 | db_list = load_dbc_files(inp.dbc_paths) 20 | log_files = list_log_files(fs, inp.devices, start_times, True, inp.pw) 21 | 22 | # process log files and write extracted signals to InfluxDB 23 | proc = ProcessData(fs, db_list, inp.signals, inp.days_offset) 24 | 25 | for log_file in log_files: 26 | df_raw, device_id = proc.get_raw_data(log_file, inp.pw) 27 | 28 | if inp.tp_type != "": 29 | tp = MultiFrameDecoder(inp.tp_type) 30 | df_raw = tp.combine_tp_frames(df_raw) 31 | 32 | df_phys = proc.extract_phys(df_raw) 33 | proc.print_log_summary(device_id, log_file, df_phys) 34 | 35 | df_phys = add_signal_prefix(df_phys, can_id_prefix=inp.can_id_prefix, pgn_prefix=inp.pgn_prefix, bus_prefix=inp.bus_prefix) 36 | 37 | df_phys = restructure_data(df_phys,inp.res) 38 | influx.write_signals(device_id, df_phys) 39 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiobotocore==2.5.0 2 | aiohttp==3.8.1 3 | aioitertools==0.10.0 4 | aiosignal==1.2.0 5 | async-timeout==4.0.2 6 | attrs==21.4.0 7 | botocore==1.29.76 8 | can_decoder>=0.1.9 9 | canedge-browser>=0.0.8 10 | canmatrix==0.9.5 11 | certifi==2021.10.8 12 | charset-normalizer==2.0.12 13 | click==8.1.2 14 | colorama==0.4.4 15 | frozenlist==1.3.0 16 | fsspec==2023.4.0 17 | future==0.18.3 18 | idna==3.4 19 | influxdb-client==1.35.0 20 | J1939-PGN==0.4 21 | jmespath==0.10.0 22 | mdf-iter>=2.1.1 23 | multidict==6.0.2 24 | numpy==1.24.1 25 | pandas==1.5.3 26 | python-dateutil==2.8.2 27 | pytz==2022.1 28 | reactivex==4.0.4 29 | s3fs==2023.4.0 30 | six==1.16.0 31 | typing_extensions==4.1.1 32 | urllib3==1.26.9 33 | wrapt==1.14.0 34 | yarl==1.7.2 35 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | def setup_fs(s3, key="", secret="", endpoint="", region="",cert="", passwords={}): 2 | """Given a boolean specifying whether to use local disk or S3, setup filesystem 3 | Syntax examples: AWS (http://s3.us-east-2.amazonaws.com), MinIO (http://192.168.0.1:9000) 4 | The cert input is relevant if you're using MinIO with TLS enabled, for specifying the path to the certficiate. 5 | For MinIO you should also parse the region_name 6 | 7 | The block_size is set to accomodate files up to 55 MB in size. If your log files are larger, adjust this value accordingly 8 | """ 9 | 10 | if s3: 11 | import s3fs 12 | 13 | block_size = 55 * 1024 * 1024 14 | 15 | if "amazonaws" in endpoint: 16 | fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size) 17 | elif cert != "": 18 | fs = s3fs.S3FileSystem( 19 | key=key, 20 | secret=secret, 21 | client_kwargs={"endpoint_url": endpoint, "verify": cert, "region_name": region}, 22 | default_block_size=block_size, 23 | ) 24 | else: 25 | fs = s3fs.S3FileSystem( 26 | key=key, 27 | secret=secret, 28 | client_kwargs={"endpoint_url": endpoint, "region_name": region}, 29 | default_block_size=block_size, 30 | ) 31 | 32 | else: 33 | from pathlib import Path 34 | import canedge_browser 35 | 36 | base_path = Path(__file__).parent 37 | fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords) 38 | 39 | return fs 40 | 41 | 42 | # ----------------------------------------------- 43 | def load_dbc_files(dbc_paths): 44 | """Given a list of DBC file paths, create a list of conversion rule databases""" 45 | import can_decoder 46 | from pathlib import Path 47 | 48 | db_list = [] 49 | for dbc in dbc_paths: 50 | db = can_decoder.load_dbc(Path(__file__).parent / dbc) 51 | db_list.append(db) 52 | 53 | return db_list 54 | 55 | 56 | # ----------------------------------------------- 57 | def list_log_files(fs, devices, start_times, verbose=True, passwords={}): 58 | """Given a list of device paths, list log files from specified filesystem. 59 | Data is loaded based on the list of start datetimes 60 | """ 61 | import canedge_browser 62 | 63 | log_files = [] 64 | 65 | if len(start_times): 66 | for idx, device in enumerate(devices): 67 | start = start_times[idx] 68 | log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords) 69 | log_files.extend(log_files_device) 70 | 71 | if verbose: 72 | print(f"Found {len(log_files)} log files\n") 73 | 74 | return log_files 75 | 76 | def add_signal_prefix(df_phys, can_id_prefix=False, pgn_prefix=False, bus_prefix=False): 77 | """Rename Signal names by prefixing the full 78 | CAN ID (in hex) and/or J1939 PGN 79 | """ 80 | from J1939_PGN import J1939_PGN 81 | 82 | if df_phys.empty: 83 | return df_phys 84 | else: 85 | prefix = "" 86 | if bus_prefix: 87 | prefix += df_phys["BusChannel"].apply(lambda x: f"{x}.") 88 | if can_id_prefix: 89 | prefix += df_phys["CAN ID"].apply(lambda x: f"{hex(int(x))[2:].upper()}." ) 90 | if pgn_prefix: 91 | prefix += df_phys["CAN ID"].apply(lambda x: f"{J1939_PGN(int(x)).pgn}.") 92 | 93 | df_phys["Signal"] = prefix + df_phys["Signal"] 94 | 95 | return df_phys 96 | 97 | def restructure_data(df_phys, res, ffill=False): 98 | """Restructure the decoded data to a resampled 99 | format where each column reflects a Signal 100 | """ 101 | import pandas as pd 102 | 103 | if not df_phys.empty and res != "": 104 | df_phys = df_phys.pivot_table(values="Physical Value", index=pd.Grouper(freq=res), columns="Signal") 105 | 106 | if ffill: 107 | df_phys = df_phys.ffill() 108 | 109 | return df_phys 110 | 111 | 112 | def test_signal_threshold(df_phys, signal, threshold): 113 | """Illustrative example for how to extract a signal and evaluate statistical values 114 | vs. defined thresholds. The function can be easily modified for your needs. 115 | """ 116 | df_signal = df_phys[df_phys["Signal"] == signal]["Physical Value"] 117 | 118 | stats = df_signal.agg(["count", "min", "max", "mean", "std"]) 119 | delta = stats["max"] - stats["min"] 120 | 121 | if delta > threshold: 122 | print(f"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}") 123 | 124 | 125 | def add_custom_sig(df_phys, signal1, signal2, function, new_signal): 126 | """Helper function for calculating a new signal based on two signals and a function. 127 | Returns a dataframe with the new signal name and physical values 128 | """ 129 | import pandas as pd 130 | 131 | try: 132 | s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1) 133 | s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2) 134 | 135 | df_new_sig = pd.merge_ordered( 136 | s1, 137 | s2, 138 | on="TimeStamp", 139 | fill_method="ffill", 140 | ).set_index("TimeStamp") 141 | df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame() 142 | df_new_sig["Signal"] = new_signal 143 | df_phys = df_phys.append(df_new_sig) 144 | 145 | except: 146 | print(f"Warning: Custom signal {new_signal} not created\n") 147 | 148 | return df_phys 149 | 150 | 151 | # ----------------------------------------------- 152 | class ProcessData: 153 | def __init__(self, fs, db_list, signals=[], days_offset=None, verbose=True): 154 | from datetime import datetime, timedelta 155 | 156 | self.db_list = db_list 157 | self.signals = signals 158 | self.fs = fs 159 | self.days_offset = days_offset 160 | self.verbose = verbose 161 | 162 | if self.verbose == True and self.days_offset != None: 163 | date_offset = (datetime.today() - timedelta(days=self.days_offset)).strftime("%Y-%m-%d") 164 | print( 165 | f"Warning: days_offset = {self.days_offset}, meaning data is offset to start at {date_offset}.\nThis is intended for sample data testing only. Set days_offset = None when processing your own data." 166 | ) 167 | 168 | return 169 | 170 | def extract_phys(self, df_raw): 171 | """Given df of raw data and list of decoding databases, create new def with 172 | physical values (no duplicate signals and optionally filtered/rebaselined) 173 | """ 174 | import can_decoder 175 | import pandas as pd 176 | 177 | df_phys = pd.DataFrame() 178 | df_phys_temp = [] 179 | for db in self.db_list: 180 | df_decoder = can_decoder.DataFrameDecoder(db) 181 | 182 | for bus, bus_group in df_raw.groupby("BusChannel"): 183 | for length, group in bus_group.groupby("DataLength"): 184 | df_phys_group = df_decoder.decode_frame(group) 185 | if not df_phys_group.empty: 186 | df_phys_group["BusChannel"] = bus 187 | df_phys_temp.append(df_phys_group) 188 | 189 | df_phys = pd.concat(df_phys_temp, ignore_index=False).sort_index() 190 | 191 | # remove duplicates in case multiple DBC files contain identical signals 192 | df_phys["datetime"] = df_phys.index 193 | df_phys = df_phys.drop_duplicates(keep="first") 194 | df_phys = df_phys.drop(labels="datetime", axis=1) 195 | 196 | # optionally filter and rebaseline the data 197 | df_phys = self.filter_signals(df_phys) 198 | 199 | if not df_phys.empty and type(self.days_offset) == int: 200 | df_phys = self.rebaseline_data(df_phys) 201 | 202 | return df_phys 203 | 204 | def rebaseline_data(self, df_phys): 205 | """Given a df of physical values, this offsets the timestamp 206 | to be equal to today, minus a given number of days. 207 | """ 208 | from datetime import datetime, timezone 209 | import pandas as pd 210 | 211 | delta_days = (datetime.now(timezone.utc) - df_phys.index.min()).days - self.days_offset 212 | df_phys.index = df_phys.index + pd.Timedelta(delta_days, "day") 213 | 214 | return df_phys 215 | 216 | def filter_signals(self, df_phys): 217 | """Given a df of physical values, return only signals matched by filter""" 218 | if not df_phys.empty and len(self.signals): 219 | df_phys = df_phys[df_phys["Signal"].isin(self.signals)] 220 | 221 | return df_phys 222 | 223 | def get_raw_data(self, log_file, passwords={},lin=False): 224 | """Extract a df of raw data and device ID from log file. 225 | Optionally include LIN bus data by setting lin=True 226 | """ 227 | import mdf_iter 228 | 229 | with self.fs.open(log_file, "rb") as handle: 230 | mdf_file = mdf_iter.MdfFile(handle, passwords=passwords) 231 | device_id = self.get_device_id(mdf_file) 232 | 233 | if lin: 234 | df_raw_lin = mdf_file.get_data_frame_lin() 235 | df_raw_lin["IDE"] = 0 236 | df_raw_can = mdf_file.get_data_frame() 237 | df_raw = df_raw_can.append(df_raw_lin) 238 | else: 239 | df_raw = mdf_file.get_data_frame() 240 | 241 | return df_raw, device_id 242 | 243 | def get_device_id(self, mdf_file): 244 | return mdf_file.get_metadata()["HDcomment.Device Information.serial number"]["value_raw"] 245 | 246 | def print_log_summary(self, device_id, log_file, df_phys): 247 | """Print summary information for each log file""" 248 | if self.verbose: 249 | print( 250 | "\n---------------", 251 | f"\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\n", 252 | ) 253 | 254 | 255 | # ----------------------------------------------- 256 | class MultiFrameDecoder: 257 | 258 | """Class for handling transport protocol data. For each response ID, identify 259 | sequences of subsequent frames and combine the relevant parts of the data payloads 260 | into a single payload with the relevant CAN ID. The original raw dataframe is 261 | then cleansed of the original response ID sequence frames. Instead, the new reassembled 262 | frames are inserted. 263 | 264 | :param tp_type: the class supports UDS ("uds"), NMEA 2000 Fast Packets ("nmea") and J1939 ("j1939") 265 | :param df_raw: dataframe of raw CAN data from the mdf_iter module 266 | 267 | SINGLE_FRAME_MASK: mask used in matching single frames 268 | FIRST_FRAME_MASK: mask used in matching first frames 269 | CONSEQ_FRAME_MASK: mask used in matching consequtive frames 270 | SINGLE_FRAME: frame type reflecting a single frame response 271 | FIRST_FRAME: frame type reflecting the first frame in a multi frame response 272 | CONSEQ_FRAME: frame type reflecting a consequtive frame in a multi frame response 273 | ff_payload_start: the combined payload will start at this byte in the FIRST_FRAME 274 | bam_pgn: this is used in J1939 and marks the initial BAM message ID in DEC 275 | res_id_list: TP 'response CAN IDs' to process 276 | 277 | """ 278 | FRAME_STRUCT = { 279 | "": {}, 280 | "uds": { 281 | "SINGLE_FRAME_MASK": 0xF0, 282 | "FIRST_FRAME_MASK": 0xF0, 283 | "CONSEQ_FRAME_MASK": 0xF0, 284 | "SINGLE_FRAME": 0x00, 285 | "FIRST_FRAME": 0x10, 286 | "CONSEQ_FRAME": 0x20, 287 | "ff_payload_start": 1, 288 | "bam_pgn": -1, 289 | "res_id_list": [1960, 2016, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2026, 1979, 1992, 1998, 2001, 402522235], 290 | "group": "ID" 291 | }, 292 | "j1939": { 293 | "SINGLE_FRAME_MASK": 0xFF, 294 | "FIRST_FRAME_MASK": 0xFF, 295 | "CONSEQ_FRAME_MASK": 0x00, 296 | "SINGLE_FRAME": 0xFF, 297 | "FIRST_FRAME": 0x20, 298 | "CONSEQ_FRAME": 0x00, 299 | "ff_payload_start": 8, 300 | "bam_pgn": 60416, 301 | "res_id_list": [60416, 60160], 302 | "group": "SA" 303 | }, 304 | "nmea": { 305 | "SINGLE_FRAME_MASK": 0xFF, 306 | "FIRST_FRAME_MASK": 0x1F, 307 | "CONSEQ_FRAME_MASK": 0x00, 308 | "SINGLE_FRAME": 0xFF, 309 | "FIRST_FRAME": 0x00, 310 | "CONSEQ_FRAME": 0x00, 311 | "ff_payload_start": 2, 312 | "bam_pgn": -1, 313 | "res_id_list":[126983, 126984, 126985, 126986, 126987, 126988, 126996, 127233, 127237, 127489, 127496, 127497, 127503, 127504, 127506, 127751, 128275, 128520, 128538, 129029, 129038, 129039, 129040, 129041, 129044, 129284, 129285, 129301, 129302, 129538, 129540, 129541, 129542, 129545, 129547, 129549, 129551, 129556, 129792, 129793, 129794, 129795, 129796, 129798, 129799, 129800, 129801, 129803, 129804, 129805, 129806, 129807, 129808, 129809, 129810, 129811, 129812, 129813, 129814, 129815, 129816, 130052, 130053, 130054, 130060, 130061, 130064, 130065, 130067, 130068, 130069, 130070, 130071, 130072, 130073, 130074, 130320, 130321, 130322, 130323, 130324, 130564, 130565, 130567, 130569, 130571, 130575, 130577, 130578, 130581, 130584, 130586], 314 | "group": "ID" 315 | }} 316 | 317 | def __init__(self, tp_type=""): 318 | self.tp_type = tp_type 319 | return 320 | 321 | def calculate_pgn(self, frame_id): 322 | pgn = (frame_id & 0x03FFFF00) >> 8 323 | pgn_f = pgn & 0xFF00 324 | if pgn_f < 0xF000: 325 | pgn &= 0xFFFFFF00 326 | return pgn 327 | 328 | def calculate_sa(self, frame_id): 329 | sa = frame_id & 0x000000FF 330 | return sa 331 | 332 | def construct_new_tp_frame(self, base_frame, payload_concatenated, can_id): 333 | new_frame = base_frame.copy() 334 | new_frame["DataBytes"] = payload_concatenated 335 | new_frame["DLC"] = 0 336 | new_frame["DataLength"] = len(payload_concatenated) 337 | if can_id: 338 | new_frame["ID"] = can_id 339 | return new_frame 340 | 341 | def identify_matching_ids(self,df_raw,res_id_list_full, bam_pgn): 342 | # identify which CAN IDs (or PGNs) match the TP IDs and create a filtered df_raw_match 343 | # which is used to separate the df_raw into two parts: Incl/excl TP frames. 344 | # Also produces a reduced res_id_list that only contains relevant ID entries 345 | if self.tp_type == "nmea": 346 | df_raw_pgns = df_raw["ID"].apply(self.calculate_pgn) 347 | df_raw_match = df_raw_pgns.isin(res_id_list_full) 348 | res_id_list = df_raw_pgns[df_raw_match].drop_duplicates().values.tolist() 349 | if self.tp_type == "j1939": 350 | df_raw_pgns = df_raw["ID"].apply(self.calculate_pgn) 351 | df_raw_match = df_raw_pgns.isin(res_id_list_full) 352 | res_id_list = res_id_list_full.copy() 353 | res_id_list.remove(bam_pgn) 354 | if type(res_id_list) is not list: 355 | res_id_list = [res_id_list] 356 | elif self.tp_type == "uds": 357 | df_raw_pgns = None 358 | df_raw_match = df_raw["ID"].isin(res_id_list_full) 359 | res_id_list = df_raw["ID"][df_raw_match].drop_duplicates().values.tolist() 360 | 361 | df_raw_tp = df_raw[df_raw_match] 362 | df_raw_excl_tp = df_raw[~df_raw_match] 363 | 364 | if len(df_raw) - len(df_raw_tp) - len(df_raw_excl_tp): 365 | print("Warning - total rows does not equal sum of rows incl/excl transport protocol frames") 366 | 367 | return df_raw_tp, df_raw_excl_tp, res_id_list, df_raw_pgns 368 | 369 | def filter_df_raw_tp(self, df_raw_tp, df_raw_tp_pgns,res_id): 370 | # filter df_raw_tp to include only frames for the specific response ID res_id 371 | if self.tp_type == "nmea": 372 | df_raw_tp_res_id = df_raw_tp[df_raw_tp_pgns.isin([res_id])] 373 | elif self.tp_type == "j1939": 374 | df_raw_tp_res_id = df_raw_tp 375 | df_raw_tp_res_id = df_raw_tp_res_id.copy() 376 | df_raw_tp_res_id["SA"] = df_raw_tp_res_id["ID"].apply(self.calculate_sa) 377 | else: 378 | df_raw_tp_res_id = df_raw_tp[df_raw_tp["ID"].isin([res_id])] 379 | return df_raw_tp_res_id 380 | 381 | def check_if_first_frame(self,row, bam_pgn, first_frame_mask,first_frame): 382 | # check if row reflects the first frame of a TP sequence 383 | if self.tp_type == "j1939" and bam_pgn == self.calculate_pgn(row.ID): 384 | first_frame_test = True 385 | elif (row.DataBytes[0] & first_frame_mask) == first_frame: 386 | first_frame_test = True 387 | else: 388 | first_frame_test = False 389 | 390 | return first_frame_test 391 | 392 | def pgn_to_can_id(self,row): 393 | # for J1939, extract PGN and convert to 29 bit CAN ID for use in baseframe 394 | pgn_hex = "".join("{:02x}".format(x) for x in reversed(row.DataBytes[5:8])) 395 | pgn = int(pgn_hex, 16) 396 | can_id = (6 << 26) | (pgn << 8) | row.SA 397 | return can_id 398 | 399 | def get_payload_length(self,row): 400 | if self.tp_type == "uds": 401 | ff_length = (row.DataBytes[0] & 0x0F) << 8 | row.DataBytes[1] 402 | if self.tp_type == "nmea": 403 | ff_length = row.DataBytes[1] 404 | if self.tp_type == "j1939": 405 | ff_length = int("".join("{:02x}".format(x) for x in reversed(row.DataBytes[1:2])),16) 406 | return ff_length 407 | 408 | def combine_tp_frames(self, df_raw): 409 | # main function that reassembles TP frames in df_raw 410 | import pandas as pd 411 | 412 | # if tp_type = "" return original df_raw 413 | if self.tp_type not in ["uds","nmea", "j1939"]: 414 | return df_raw 415 | 416 | # extract protocol specific TP frame info 417 | frame_struct = MultiFrameDecoder.FRAME_STRUCT[self.tp_type] 418 | res_id_list_full = frame_struct["res_id_list"] 419 | bam_pgn = frame_struct["bam_pgn"] 420 | ff_payload_start = frame_struct["ff_payload_start"] 421 | first_frame_mask = frame_struct["FIRST_FRAME_MASK"] 422 | first_frame = frame_struct["FIRST_FRAME"] 423 | single_frame_mask = frame_struct["SINGLE_FRAME_MASK"] 424 | single_frame = frame_struct["SINGLE_FRAME"] 425 | conseq_frame_mask = frame_struct["CONSEQ_FRAME_MASK"] 426 | conseq_frame = frame_struct["CONSEQ_FRAME"] 427 | 428 | # split df_raw in two (incl/excl TP frames) 429 | df_raw_tp, df_raw_excl_tp, res_id_list, df_raw_pgns = self.identify_matching_ids(df_raw,res_id_list_full, bam_pgn) 430 | 431 | # initiate new df_raw that will contain both the df_raw excl. TP frames and subsequently all combined TP frames 432 | df_raw = [df_raw_excl_tp] 433 | 434 | # for NMEA, apply PGN decoding outside loop 435 | if self.tp_type == "nmea": 436 | df_raw_tp_pgns = df_raw_tp["ID"].apply(self.calculate_pgn) 437 | else: 438 | df_raw_tp_pgns = None 439 | 440 | # loop through each relevant TP response ID 441 | for res_id in res_id_list: 442 | 443 | # get subset of df_raw_tp containing res_id 444 | df_raw_tp_res_id = self.filter_df_raw_tp(df_raw_tp,df_raw_tp_pgns, res_id) 445 | 446 | # distinguish channels 447 | for channel, df_channel in df_raw_tp_res_id.groupby("BusChannel"): 448 | 449 | # distinguish IDs from PGNs by grouping on ID (or SA for J1939) 450 | for identifier, df_raw_filter in df_channel.groupby(frame_struct["group"]): 451 | base_frame = df_raw_filter.iloc[0] 452 | frame_list = [] 453 | frame_timestamp_list = [] 454 | payload_concatenated = [] 455 | 456 | ff_length = 0xFFF 457 | first_first_frame_test = True 458 | can_id = None 459 | conseq_frame_prev = None 460 | 461 | # iterate through rows in filtered dataframe 462 | for row in df_raw_filter.itertuples(index=True,name='Pandas'): 463 | index = row.Index 464 | first_frame_test = self.check_if_first_frame(row, bam_pgn, first_frame_mask,first_frame) 465 | first_byte = row.DataBytes[0] 466 | 467 | # if single frame, save frame directly (excl. 1st byte) 468 | if self.tp_type != "nmea" and (first_byte & single_frame_mask == single_frame): 469 | new_frame = self.construct_new_tp_frame(base_frame, row.DataBytes, row.ID) 470 | frame_list.append(new_frame.values.tolist()) 471 | frame_timestamp_list.append(index) 472 | 473 | # if first frame, save info from prior multi frame response sequence, 474 | # then initialize a new sequence incl. the first frame payload 475 | elif first_frame_test: 476 | # create a new frame using information from previous iterations 477 | if len(payload_concatenated) >= ff_length: 478 | new_frame = self.construct_new_tp_frame(base_frame, payload_concatenated, can_id) 479 | frame_list.append(new_frame.values.tolist()) 480 | frame_timestamp_list.append(frame_timestamp) 481 | 482 | # reset and start next frame with timestamp & CAN ID from this first frame plus initial payload 483 | conseq_frame_prev = None 484 | frame_timestamp = index 485 | 486 | if self.tp_type == "j1939": 487 | can_id = self.pgn_to_can_id(row) 488 | 489 | ff_length = self.get_payload_length(row) 490 | payload_concatenated = row.DataBytes[ff_payload_start:] 491 | 492 | # if consequtive frame, extend payload with payload excl. 1st byte 493 | elif (conseq_frame_prev == None) or ((first_byte - conseq_frame_prev) == 1): 494 | conseq_frame_prev = first_byte 495 | payload_concatenated += row.DataBytes[1:] 496 | 497 | 498 | df_raw_res_id_new = pd.DataFrame(frame_list, columns=base_frame.index, index=frame_timestamp_list) 499 | df_raw.append(df_raw_res_id_new) 500 | 501 | df_raw = pd.concat(df_raw,join='outer') 502 | df_raw.index.name = "TimeStamp" 503 | df_raw = df_raw.sort_index() 504 | return df_raw 505 | -------------------------------------------------------------------------------- /utils_db.py: -------------------------------------------------------------------------------- 1 | class SetupInflux: 2 | def __init__(self, influx_url, token, org_id, influx_bucket, res, debug=False, verbose=True): 3 | from influxdb_client import InfluxDBClient 4 | 5 | self.influx_url = influx_url 6 | self.token = token 7 | self.org_id = org_id 8 | self.influx_bucket = influx_bucket 9 | self.debug = debug 10 | self.verbose = verbose 11 | self.res = res 12 | self.client = InfluxDBClient(url=self.influx_url, token=self.token, org=self.org_id, debug=False) 13 | self.test = self.test_influx() 14 | return 15 | 16 | def __del__(self): 17 | self.client.__del__() 18 | 19 | def get_start_times(self, devices, default_start, dynamic): 20 | """Get latest InfluxDB timestamps for devices for use as 'start times' for listing log files from S3""" 21 | from datetime import datetime, timedelta 22 | from dateutil.tz import tzutc 23 | 24 | default_start_dt = datetime.strptime(default_start, "%Y-%m-%d %H:%M:%S").replace(tzinfo=tzutc()) 25 | device_ids = [device.split("/")[1] for device in devices] 26 | start_times = [] 27 | 28 | if dynamic == False or self.test == 0: 29 | for device in device_ids: 30 | last_time = default_start_dt 31 | start_times.append(last_time) 32 | elif self.test != 0: 33 | for device in device_ids: 34 | influx_time = self.client.query_api().query( 35 | f'from(bucket:"{self.influx_bucket}") |> range(start: -100d) |> filter(fn: (r) => r["_measurement"] == "{device}") |> group() |> last()' 36 | ) 37 | 38 | if len(influx_time) == 0: 39 | last_time = default_start_dt 40 | else: 41 | last_time = influx_time[0].records[0]["_time"] 42 | last_time = last_time + timedelta(seconds=2) 43 | 44 | start_times.append(last_time) 45 | 46 | if self.verbose: 47 | print(f"Log files will be fetched for {device} from {last_time}") 48 | 49 | return start_times 50 | 51 | def add_signal_tags(self, df_signal): 52 | """Advanced: This can be used to add custom tags to the signals 53 | based on a specific use case logic. In effect, this will 54 | split the signal into multiple timeseries 55 | """ 56 | tag_columns = ["tag"] 57 | 58 | def event_test(row): 59 | return "event" if row[0] > 1200 else "no event" 60 | 61 | for tag in tag_columns: 62 | df_signal[tag] = df_signal.apply(lambda row: event_test(row), axis=1) 63 | 64 | return tag_columns, df_signal 65 | 66 | def write_signals(self, device_id, df_phys): 67 | """Given a device ID and a dataframe of physical values, 68 | resample and write each signal to a time series database 69 | 70 | :param device_id: ID of device (used as the 'measurement name') 71 | :param df_phys: Dataframe of physical values (e.g. as per output of can_decoder) 72 | """ 73 | tag_columns = [] 74 | 75 | if df_phys.empty: 76 | print("Warning: Dataframe is empty, no data written") 77 | return 78 | else: 79 | if self.res != "": 80 | self.write_influx(device_id, df_phys, []) 81 | 82 | else: 83 | for signal, group in df_phys.groupby("Signal")["Physical Value"]: 84 | df_signal = group.to_frame().rename(columns={"Physical Value": signal}) 85 | 86 | # if self.res != "": 87 | # df_signal = df_signal.resample(self.res).ffill().dropna() 88 | 89 | if self.verbose: 90 | print(f"Signal: {signal} (mean: {round(df_signal[signal].mean(),2)} | records: {len(df_signal)} | resampling: {self.res})") 91 | 92 | # tag_columns, df_signal = self.add_signal_tags(df_signal) 93 | 94 | self.write_influx(device_id, df_signal, tag_columns) 95 | 96 | def write_influx(self, name, df, tag_columns): 97 | """Helper function to write signal dataframes to InfluxDB""" 98 | from influxdb_client import WriteOptions 99 | 100 | if self.test == 0: 101 | print("Please check your InfluxDB credentials") 102 | return 103 | 104 | with self.client.write_api( 105 | write_options=WriteOptions( 106 | batch_size=20_000, 107 | flush_interval=1_000, 108 | jitter_interval=0, 109 | retry_interval=5_000, 110 | ) 111 | ) as _write_client: 112 | _write_client.write(self.influx_bucket, record=df, data_frame_measurement_name=name, 113 | data_frame_tag_columns=tag_columns) 114 | 115 | if self.verbose: 116 | print(f"- SUCCESS: {len(df.index)} records of {name} written to InfluxDB\n\n") 117 | 118 | _write_client.__del__() 119 | 120 | def delete_influx(self, device): 121 | """Given a 'measurement' name (e.g. device ID), delete the related data from InfluxDB""" 122 | start = "1970-01-01T00:00:00Z" 123 | stop = "2099-01-01T00:00:00Z" 124 | 125 | delete_api = self.client.delete_api() 126 | delete_api.delete( 127 | start, 128 | stop, 129 | f'_measurement="{device}"', 130 | bucket=self.influx_bucket, 131 | org=self.org_id, 132 | ) 133 | 134 | def test_influx(self): 135 | """Test the connection to your InfluxDB database""" 136 | if self.influx_url == "influx_endpoint": 137 | result = 0 138 | else: 139 | try: 140 | test = self.client.query_api().query(f'from(bucket:"{self.influx_bucket}") |> range(start: -10s)') 141 | result = 1 142 | except Exception as err: 143 | self.print_influx_error(str(err)) 144 | result = 0 145 | 146 | return result 147 | 148 | def print_influx_error(self, err): 149 | warning = "- WARNING: Unable to write data to InfluxDB |" 150 | 151 | if "CERTIFICATE_VERIFY_FAILED" in err: 152 | print(f"{warning} check your influx_url ({self.influx_url})") 153 | elif "organization name" in err: 154 | print(f"{warning} check your org_id ({self.org_id})") 155 | elif "unauthorized access" in err: 156 | print(f"{warning} check your influx_url and token") 157 | elif "could not find bucket" in err: 158 | print(f"{warning} check your influx_bucket ({self.influx_bucket})") 159 | else: 160 | print(err) 161 | --------------------------------------------------------------------------------