├── .gitignore
├── LICENSE
├── LOG
├── 2F6913DB
│ ├── 00000019
│ │ └── 00000001-61D9D80A.MF4
│ ├── 00000020
│ │ └── 00000001-61D9D830.MF4
│ └── 00000021
│ │ └── 00000001-61D9D9BA.MF4
└── canmod-gps.dbc
├── README.md
├── canedge_datasource
├── CanedgeFileSystem.py
├── __init__.py
├── alive.py
├── annotations.py
├── enums.py
├── query.py
├── search.py
├── signal.py
└── time_range.py
├── canedge_datasource_cli.py
├── canedge_grafana_backend.service
├── dashboard_templates
├── dashboard-template-sample-data.json
└── dashboard-template-simple.json
├── install.bat
├── requirements.txt
├── run_local.bat
├── run_s3.bat
├── test
├── test_canedge_time_series.py
├── test_resample.py
├── test_signals.py
└── tester_browser.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | *css_*
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 CSS-Electronics
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/LOG/2F6913DB/00000019/00000001-61D9D80A.MF4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CSS-Electronics/canedge-grafana-backend/d28b29bc3e6b71605b11ae4c8c29beb2aeae1b91/LOG/2F6913DB/00000019/00000001-61D9D80A.MF4
--------------------------------------------------------------------------------
/LOG/2F6913DB/00000020/00000001-61D9D830.MF4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CSS-Electronics/canedge-grafana-backend/d28b29bc3e6b71605b11ae4c8c29beb2aeae1b91/LOG/2F6913DB/00000020/00000001-61D9D830.MF4
--------------------------------------------------------------------------------
/LOG/2F6913DB/00000021/00000001-61D9D9BA.MF4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CSS-Electronics/canedge-grafana-backend/d28b29bc3e6b71605b11ae4c8c29beb2aeae1b91/LOG/2F6913DB/00000021/00000001-61D9D9BA.MF4
--------------------------------------------------------------------------------
/LOG/canmod-gps.dbc:
--------------------------------------------------------------------------------
1 | VERSION ""
2 |
3 |
4 | NS_ :
5 | NS_DESC_
6 | CM_
7 | BA_DEF_
8 | BA_
9 | VAL_
10 | CAT_DEF_
11 | CAT_
12 | FILTER
13 | BA_DEF_DEF_
14 | EV_DATA_
15 | ENVVAR_DATA_
16 | SGTYPE_
17 | SGTYPE_VAL_
18 | BA_DEF_SGTYPE_
19 | BA_SGTYPE_
20 | SIG_TYPE_REF_
21 | VAL_TABLE_
22 | SIG_GROUP_
23 | SIG_VALTYPE_
24 | SIGTYPE_VALTYPE_
25 | BO_TX_BU_
26 | BA_DEF_REL_
27 | BA_REL_
28 | BA_DEF_DEF_REL_
29 | BU_SG_REL_
30 | BU_EV_REL_
31 | BU_BO_REL_
32 | SG_MUL_VAL_
33 |
34 | BS_:
35 |
36 | BU_:
37 |
38 |
39 | BO_ 3 gnss_pos: 8 Vector__XXX
40 | SG_ PositionAccuracy : 58|6@1+ (1,0) [0|63] "m" Vector__XXX
41 | SG_ Latitude : 1|28@1+ (1e-06,-90) [-90|90] "deg" Vector__XXX
42 | SG_ Longitude : 29|29@1+ (1e-06,-180) [-180|180] "deg" Vector__XXX
43 | SG_ PositionValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
44 |
45 | BO_ 2 gnss_time: 6 Vector__XXX
46 | SG_ TimeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
47 | SG_ TimeConfirmed : 1|1@1+ (1,0) [0|1] "" Vector__XXX
48 | SG_ Epoch : 8|40@1+ (0.001,1577840400) [1577840400|2677352027] "sec" Vector__XXX
49 |
50 | BO_ 5 gnss_attitude: 8 Vector__XXX
51 | SG_ AttitudeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
52 | SG_ Roll : 1|12@1+ (0.1,-180) [-180|180] "deg" Vector__XXX
53 | SG_ Pitch : 22|12@1+ (0.1,-90) [-90|90] "deg" Vector__XXX
54 | SG_ Heading : 43|12@1+ (0.1,0) [0|360] "deg" Vector__XXX
55 | SG_ RollAccuracy : 13|9@1+ (0.1,0) [0|50] "deg" Vector__XXX
56 | SG_ PitchAccuracy : 34|9@1+ (0.1,0) [0|50] "deg" Vector__XXX
57 | SG_ HeadingAccuracy : 55|9@1+ (0.1,0) [0|50] "deg" Vector__XXX
58 |
59 | BO_ 6 gnss_odo: 8 Vector__XXX
60 | SG_ DistanceTrip : 1|22@1+ (1,0) [0|4194303] "m" Vector__XXX
61 | SG_ DistanceAccuracy : 23|19@1+ (1,0) [0|524287] "m" Vector__XXX
62 | SG_ DistanceValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
63 | SG_ DistanceTotal : 42|22@1+ (1,0) [0|4194303] "km" Vector__XXX
64 |
65 | BO_ 1 gnss_status: 1 Vector__XXX
66 | SG_ FixType : 0|3@1+ (1,0) [0|5] "" Vector__XXX
67 | SG_ Satellites : 3|5@1+ (1,0) [0|31] "" Vector__XXX
68 |
69 | BO_ 4 gnss_altitude: 4 Vector__XXX
70 | SG_ AltitudeValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
71 | SG_ Altitude : 1|18@1+ (0.1,-6000) [-6000|20000] "m" Vector__XXX
72 | SG_ AltitudeAccuracy : 19|13@1+ (1,0) [0|8000] "m" Vector__XXX
73 |
74 | BO_ 8 gnss_geofence: 2 Vector__XXX
75 | SG_ FenceValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
76 | SG_ FenceCombined : 1|2@1+ (1,0) [0|1] "" Vector__XXX
77 | SG_ Fence1 : 8|2@1+ (1,0) [0|1] "" Vector__XXX
78 | SG_ Fence2 : 10|2@1+ (1,0) [0|1] "" Vector__XXX
79 | SG_ Fence3 : 12|2@1+ (1,0) [0|1] "" Vector__XXX
80 | SG_ Fence4 : 14|2@1+ (1,0) [0|1] "" Vector__XXX
81 |
82 | BO_ 7 gnss_speed: 5 Vector__XXX
83 | SG_ Speed : 1|20@1+ (0.001,0) [0|1048.575] "m/s" Vector__XXX
84 | SG_ SpeedAccuracy : 21|19@1+ (0.001,0) [0|524.287] "m/s" Vector__XXX
85 | SG_ SpeedValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
86 |
87 | BO_ 9 gnss_imu: 8 Vector__XXX
88 | SG_ AccelerationX : 1|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX
89 | SG_ AccelerationY : 11|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX
90 | SG_ AccelerationZ : 21|10@1+ (0.125,-64) [-64|63.875] "m/s^2" Vector__XXX
91 | SG_ AngularRateX : 31|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX
92 | SG_ AngularRateY : 42|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX
93 | SG_ AngularRateZ : 53|11@1+ (0.25,-256) [-256|255.75] "deg/s" Vector__XXX
94 | SG_ ImuValid : 0|1@1+ (1,0) [0|1] "" Vector__XXX
95 |
96 |
97 |
98 | CM_ BO_ 3 "GNSS position";
99 | CM_ SG_ 3 PositionAccuracy "Accuracy of position";
100 | CM_ SG_ 3 Latitude "Latitude";
101 | CM_ SG_ 3 Longitude "Longitude";
102 | CM_ SG_ 3 PositionValid "Position validity";
103 | CM_ BO_ 2 "GNSS time";
104 | CM_ SG_ 2 TimeValid "Time validity";
105 | CM_ SG_ 2 TimeConfirmed "Time confirmed";
106 | CM_ SG_ 2 Epoch "Epoch time";
107 | CM_ BO_ 5 "GNSS attitude";
108 | CM_ SG_ 5 AttitudeValid "Attitude validity";
109 | CM_ SG_ 5 Roll "Vehicle roll";
110 | CM_ SG_ 5 Pitch "Vehicle pitch";
111 | CM_ SG_ 5 Heading "Vehicle heading";
112 | CM_ SG_ 5 RollAccuracy "Vehicle roll accuracy";
113 | CM_ SG_ 5 PitchAccuracy "Vehicle pitch accuracy";
114 | CM_ SG_ 5 HeadingAccuracy "Vehicle heading accuracy";
115 | CM_ BO_ 6 "GNSS odometer";
116 | CM_ SG_ 6 DistanceTrip "Distance traveled since last reset";
117 | CM_ SG_ 6 DistanceAccuracy "Distance accuracy (1-sigma)";
118 | CM_ SG_ 6 DistanceTotal "Distance traveled in total";
119 | CM_ BO_ 1 "GNSS information";
120 | CM_ SG_ 1 FixType "Fix type";
121 | CM_ SG_ 1 Satellites "Number of satellites used";
122 | CM_ BO_ 4 "GNSS altitude";
123 | CM_ SG_ 4 AltitudeValid "Altitude validity";
124 | CM_ SG_ 4 Altitude "Altitude";
125 | CM_ SG_ 4 AltitudeAccuracy "Accuracy of altitude";
126 | CM_ BO_ 8 "GNSS geofence(s)";
127 | CM_ SG_ 8 FenceValid "Geofencing status";
128 | CM_ SG_ 8 FenceCombined "Combined (logical OR) state of all geofences";
129 | CM_ SG_ 8 Fence1 "Geofence 1 state";
130 | CM_ SG_ 8 Fence2 "Geofence 2 state";
131 | CM_ SG_ 8 Fence3 "Geofence 3 state";
132 | CM_ SG_ 8 Fence4 "Geofence 4 state";
133 | CM_ BO_ 7 "GNSS speed";
134 | CM_ SG_ 7 Speed "Speed";
135 | CM_ SG_ 7 SpeedAccuracy "Speed accuracy";
136 | CM_ BO_ 9 "GNSS IMU";
137 | CM_ SG_ 9 AccelerationX "X acceleration with a resolution of 0.125 m/s^2";
138 | CM_ SG_ 9 AccelerationY "Y acceleration with a resolution of 0.125 m/s^2";
139 | CM_ SG_ 9 AccelerationZ "Z acceleration with a resolution of 0.125 m/s^2";
140 | CM_ SG_ 9 AngularRateX "X angular rate with a resolution of 0.25 deg/s";
141 | CM_ SG_ 9 AngularRateY "Y angular rate with a resolution of 0.25 deg/s";
142 | CM_ SG_ 9 AngularRateZ "Z angular rate with a resolution of 0.25 deg/s";
143 | VAL_ 3 PositionValid 0 "Invalid" 1 "Valid" ;
144 | VAL_ 2 TimeValid 0 "Invalid" 1 "Valid" ;
145 | VAL_ 2 TimeConfirmed 0 "Unconfirmed" 1 "Confirmed" ;
146 | VAL_ 5 AttitudeValid 0 "Invalid" 1 "Valid" ;
147 | VAL_ 6 DistanceValid 0 "Invalid" 1 "Valid" ;
148 | VAL_ 1 FixType 0 "No fix" 1 "Dead reckoning only" 2 "2D-fix" 3 "3D-fix" 4 "GNSS + dead reckoning combined" 5 "Time only fix" ;
149 | VAL_ 4 AltitudeValid 0 "Invalid" 1 "Valid" ;
150 | VAL_ 8 FenceValid 0 "Invalid" 1 "Valid" ;
151 | VAL_ 8 FenceCombined 0 "Unknown" 1 "Inside" 2 "Outside" ;
152 | VAL_ 8 Fence1 0 "Unknown" 1 "Inside" 2 "Outside" ;
153 | VAL_ 8 Fence2 0 "Unknown" 1 "Inside" 2 "Outside" ;
154 | VAL_ 8 Fence3 0 "Unknown" 1 "Inside" 2 "Outside" ;
155 | VAL_ 8 Fence4 0 "Unknown" 1 "Inside" 2 "Outside" ;
156 | VAL_ 7 SpeedValid 0 "Invalid" 1 "Valid" ;
157 | VAL_ 9 ImuValid 0 "Invalid" 1 "Valid" ;
158 |
159 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Grafana-Backend - Visualize CANedge CAN/LIN Data in Dashboards [LEGACY]
2 |
3 | **This project is now LEGACY and not supported - we recommend using our [Grafana-Athena](https://www.csselectronics.com/pages/telematics-dashboard-open-source) integration.**
4 |
5 | This project enables easy dashboard visualization of log files from the [CANedge](https://www.csselectronics.com/pages/can-bus-hardware-products) CAN/LIN data logger.
6 |
7 | Specifically, a light-weight backend app loads, DBC decodes and parses MDF log files from local disk or an S3 server. This is done 'on demand' in response to query requests sent from a [Grafana dashboard](https://grafana.com/) frontend by end users.
8 |
9 | 
10 |
11 |
12 | ## Backend vs. Writer
13 | We provide two options for integrating your CANedge data with Grafana dashboards:
14 |
15 | The [CANedge Grafana Backend](https://github.com/CSS-Electronics/canedge-grafana-backend) app only processes data 'when needed' by an end user - and requires no database. It is ideal when you have large amounts of data - as you only process the data you need to visualize.
16 |
17 | The [CANedge InfluxDB Writer](https://github.com/CSS-Electronics/canedge-influxdb-writer) processes data in advance (e.g. periodically or on-file-upload) and writes it to a database. It is ideal if dashboard loading speed is critical - but with the downside that data is processed & stored even if it is not used.
18 |
19 | For details incl. 'pros & cons', see our [intro to telematics dashboards](https://www.csselectronics.com/pages/telematics-dashboard-open-source).
20 |
21 | -----
22 |
23 | ## Features
24 |
25 | ```
26 | - allow users to visualize data from all of your devices & log files in Grafana
27 | - data is only processed "on request" - avoiding the need for costly databases
28 | - data can be fetched from local disk or S3
29 | - data can be visualized as soon as log files are uploaded to S3 for 'near real-time updates'
30 | - the backend app can be easily deployed on e.g. your PC or AWS EC2 instance
31 | - plug & play dashboard templates & sample data let you get started quickly
32 | - view log file sessions & splits via Annotations, enabling easy identification of underlying data
33 | - allow end users control over what devices/signals are displayed via flexible Variables
34 | - Support for CAN, CAN FD and LIN, as well as CAN ISO TP (UDS, J1939, NMEA 2000)
35 | ```
36 |
37 | ----
38 |
39 | ## Installation
40 | In this section we detail how to deploy the app on a PC or an AWS EC2 instance.
41 |
42 | Note: We recommend to test the local deployment with our sample data as the first step.
43 |
44 | ----
45 |
46 | ### 1: Deploy the integration locally on your PC
47 |
48 | A local PC deployment is recommended if you wish to load data from an SD, local disk or MinIO S3.
49 |
50 | - [Watch the step-by-step video](https://canlogger1000.csselectronics.com/img/canedge-grafana-backend-local_v3.mp4)
51 |
52 | #### Deploy the backend app locally
53 | - Install Python 3.9.13 for Windows ([32 bit](https://www.python.org/ftp/python/3.9.13/python-3.9.13.exe)/[64 bit](https://www.python.org/ftp/python/3.9.13/python-3.9.13-amd64.exe)) or [Linux](https://www.python.org/downloads/release/python-3913/) (_enable 'Add to PATH'_)
54 | - Download this project as a zip via the green button, unzip it and enter the folder
55 |
56 | ##### Windows
57 | - Double-click the `install.bat` and then `run_local.bat`
58 |
59 | ##### Linux
60 | - Open the folder with the `requirements.txt` file and enter below in your [command prompt](https://www.youtube.com/watch?v=bgSSJQolR0E&t=47s):
61 |
62 | ```
63 | python -m venv env && source env/bin/activate && pip install -r requirements.txt
64 | python canedge_datasource_cli.py file:///$PWD/LOG --port 8080 --limit 100
65 | ```
66 |
67 | #### Set up Grafana locally
68 | - [Install Grafana locally](https://grafana.com/grafana/download?platform=windows) and enter `http://localhost:3000` in your browser to open Grafana
69 | - Login via the default credentials `admin` / `admin`
70 | - In `Configuration/Plugins` install `SimpleJson` and `TrackMap`
71 | - In `Configuration/DataSources/Add datasource` select `SimpleJson`, set it as `default` with `Timeout = 1000`
72 | - Enter the URL `http://localhost:8080/`, hit `Save & test` and verify that it works
73 | - In `Dashboards/Browse` click `Import` and load the `dashboard-template-sample-data.json` from this repo
74 |
75 | You should now see the sample data visualized in Grafana.
76 |
77 | **Next:** If you aim to work with CANedge2 data from AWS S3, go to step 2 - otherwise go to step 3.
78 |
79 | ----
80 |
81 | ### 2: Load your own data & DBC files
82 | Below we outline how to load your own data & DBC files.
83 |
84 | Note: To easily start the backend, we recommend that you create a copy of the `run_local.bat` file and modify it as needed based on below.
85 |
86 | #### Load from local disk
87 | - Replace the sample `LOG/` folder with your own `LOG/` folder (or add an absolute path)
88 | - Verify that your data is structured as on the CANedge SD card i.e. `[device_id]/[session]/[split].MF4`
89 | - Add your DBC file(s) to the root of the folder
90 | - Test your setup by double clicking the `run_local.bat` again
91 |
92 | #### Load from S3
93 | - Add your DBC file(s) to the root of your S3 bucket
94 | - Right-click the `run_s3.bat` file and enter your S3 `endpoint`, `access_key`, `secret_key` and `bucket`
95 |
96 | ```
97 | python canedge_datasource_cli.py endpoint --port 8080 --limit 100 --s3_ak access_key --s3_sk secret_key --s3_bucket bucket
98 | ```
99 |
100 | - AWS S3 `endpoint` example: `https://s3.eu-central-1.amazonaws.com`
101 | - Google S3 `endpoint` example: `https://storage.googleapis.com`
102 | - MinIO S3 `endpoint` example: `http://192.168.192.1:9000`
103 |
104 | #### Import simplified dashboard template
105 | - To get started, import the `dashboard-template-simple.json` to visualize your own data
106 | - When loaded, browse to a period where you know data exists and verify that you can visualize your signals
107 | - After this, you can optionally start customizing your panels as explained in step 4
108 |
109 | #### Regarding DBC files
110 | You can load as many DBC files as you want without reducing performance, as long as your queries only use one at a time (as is e.g. the case when using the simple dashboard template). However, if your queries need to use multiple DBC files, you may consider 'combining' your DBC files for optimal performance.
111 |
112 | #### Regarding compression & encryption
113 | We recommend enabling the CANedge compression as the `MFC` files are 50%+ smaller and thus faster to load.
114 |
115 | You can also process encrypted log files (`MFE/MFM`) by adding your `passwords.json` file in the root of your data folder (see the CLI help for details).
116 |
117 | ----
118 |
119 | ### 3: Deploy the integration on AWS EC2 & Grafana Cloud
120 | An [AWS EC2](https://aws.amazon.com/ec2/) instance is an option if you wish to load data from your AWS S3 bucket, while ensuring the backend is running continuously. It is the recommended setup if you need to share access to the data across multiple users remotely. If you only need to work with the data on your own PC locally, an EC2 instance is most likely not necessary.
121 |
122 | - [Watch the step-by-step video](https://canlogger1000.csselectronics.com/img/canedge-grafana-backend-aws-ec2-cloud_v3.mp4)
123 |
124 | #### Deploy the backend app on AWS EC2
125 |
126 | - Login to AWS and verify that you are in the same region as your S3 bucket (upper right corner)
127 | - Search for `EC2/Instances` and click `Launch instances`
128 | - Select `Ubuntu Server 20.04 LTS (HVM), SSD Volume Type`, `t3.small` and proceed
129 | - In `Key pair (login)` select `Proceed without a key pair`
130 | - In Network settings click Edit and `Add security group rule`
131 | - Go to `Security group rule 2 (TCP, 8)`
132 | - Set `Type` to `Custom TCP`, `Port Range` to `8080` and `Source type` to `Anywhere`
133 | - Launch the instance and wait ~2 min
134 | - Click on your instance and note your IP (the `Public IPv4 address`)
135 | - Click `Connect/Connect` to enter the GUI console, then enter the following:
136 |
137 | ```
138 | sudo apt update && sudo apt install python3 python3-pip python3-venv tmux python-is-python3 -y
139 | git clone https://github.com/CSS-Electronics/canedge-grafana-backend.git && cd canedge-grafana-backend
140 | python -m venv env && source env/bin/activate && pip install -r requirements.txt
141 | tmux
142 | python canedge_datasource_cli.py file:///$PWD/LOG --port 8080 --limit 100
143 | ```
144 |
145 | #### Set up Grafana Cloud
146 | - [Set up](https://grafana.com/auth/sign-up/create-user) a free Grafana Cloud account and log in
147 | - In `Configuration/Plugins` install `SimpleJson` and `TrackMap` (log out and in again)
148 | - In `Configuration/DataSources/Add datasource` select `SimpleJson`, set it as `default` with `Timeout = 1000`
149 | - Replace your datasource URL with the `http://[IP]:[port]` endpoint and click `Save & test`
150 | - In `Dashboards/Browse` click `Import` and load the `dashboard-template-sample-data.json` from this repo
151 |
152 | You should now see the sample data visualized in your imported dashboard. In the AWS EC2 console you can press `ctrl + B` then `D` to de-attach from the session, allowing it to run even when you close the GUI console.
153 |
154 | **Next:** See step 3 on loading your AWS S3 data and step 5 on deploying the app as a service for production.
155 |
156 |
157 | -----
158 |
159 | ### 4: Customize your Grafana dashboard
160 |
161 | The `dashboard-template-sample-data.json` can be used to identify how to make queries, incl. below examples:
162 |
163 | ```
164 | # create a fully customized query that depends on what the user selects in the dropdown
165 | {"device":"${DEVICE}","itf":"${ITF}","chn":"${CHN}","db":"${DB}","signal":"${SIGNAL}"}
166 |
167 | # create a query for a panel that locks a signal, but keeps the device selectable
168 | {"device":"${DEVICE}","itf":"CAN","chn":"CH2","db":"canmod-gps","signal":"Speed"}
169 |
170 | # create a query for parsing multiple signals, e.g. for a TrackMap plot
171 | {"device":"${DEVICE}","itf":"CAN","chn":"CH2","db":"canmod-gps","signal":"(Latitude|Longitude)"}
172 | ```
173 |
174 | #### Bundle queries for multiple panels
175 | When displaying multiple panels in your dashboard, it is critical to setup all queries in a single panel (as in our sample data template). All other panels can then be set up to refer to the original panel by setting the datasource as `-- Dashboard --`. For both the 'query panel' and 'referring panels' you can then use the `Transform` tab to `Filter data by query`. This allows you to specify which query should be displayed in which panel. The end result is that only 1 query is sent to the backend - which means that your CANedge log files are only processed once per update.
176 |
177 |
178 |
179 | #### Set up Grafana Variables
180 | Grafana [Variables](https://grafana.com/docs/grafana/latest/variables/) allow users to dynamically control what is displayed in certain panels via dropdowns. If you load the dashboard templates, you can find a range of supported Variable queries in `Settings/Variables`.
181 |
182 | For example, the `DEVICE` Variable is a `Query` type using SimpleJson as datasource and the query `{"search":"device_name"}`. This will list all CANedge device serial numbers in the source folder and add the 'meta' field from the last log file header of each device.
183 |
184 | Replacing `device_name` for `device` displays only the device ID. If you want to add a hardcoded list of device names, you can do so by using the type `Custom` and in the values field add `name_1 : id_1, name2 : id_2` where the names reflect the names to be displayed in the dropdown, while the ids reflect the serial numbers of the CANedge devices. If you have a large number of CANedge devices, using either the `device` query or the custom approach can increase performance.
185 |
186 |
187 |
188 |
189 | #### Set up Grafana Annotations
190 |
191 | Annotations can be used to display when a new log file 'session' or 'split' occurs, as well as display the log file name. This makes it easy to identify the log files behind a specific time period - and then finding these via [CANcloud](https://canlogger.csselectronics.com/canedge-getting-started/ce2/transfer-data/server-tools/cancloud-intro/) or [TntDrive](https://canlogger.csselectronics.com/canedge-getting-started/ce2/transfer-data/server-tools/other-s3-tools/) for further processing.
192 |
193 |
194 | #### Regarding performance & stability
195 | If a request is initiated while the backend is in-progress, it'll cause a `501` error and the in-progress query will be cancelled. This helps avoid generating long queues of requests when users e.g. zoom out quickly.
196 |
197 | Further, the backend supports the `--limit` input, speciying how much log file data can be requested in one query - by default set at 100 MB. If a query exceeds this, it'll get aborted when the limit is reached. This helps avoid users initiating extreme queries of e.g. several GB.
198 |
199 | Note also that loading speed increases when displaying long time periods (as the data for the period is processed in real-time).
200 |
201 | For optimal loading speed, we recommend using a small log file split size (e.g. 1 to 10 MB uncompressed) and that you minimize your logs via filters/prescalers.
202 |
203 | ----
204 |
205 | ### 5: Move to a production setup
206 |
207 | ##### Managing your EC2 tmux session
208 |
209 | Below commands are useful in managing your `tmux` session while you're still testing your deployment.
210 |
211 | - `tmux`: Start a session
212 | - `tmux ls`: List sessions
213 | - `tmux attach`: Re-attach to session
214 | - `tmux kill-session`: Stop session
215 |
216 | #### Deploy your app as an EC2 service for production
217 |
218 | The above setup is suitable for development & testing. Once you're ready to deploy for production, you may prefer to set up a service. This ensures that your app automatically restarts after an instance reboot or a crash. To set it up as a service, follow the below steps:
219 |
220 | - Ensure you've followed the previous EC2 steps incl. the virtual environment
221 | - Update the `ExecStart` line in the `canedge_grafana_backend.service` 'unit file' with your S3 details
222 | - Upload the modified file to get a public URL
223 | - In your EC2 instance, use below commands to deploy the file
224 |
225 | ```
226 | sudo wget -N [your_file_url]
227 | sudo cp canedge_grafana_backend.service /etc/systemd/system/
228 | sudo systemctl daemon-reload
229 | sudo systemctl start canedge_grafana_backend
230 | sudo systemctl enable canedge_grafana_backend
231 | sudo journalctl -f -u canedge_grafana_backend
232 | ```
233 |
234 | The service should now be deployed, which you can verify via the console output. If you need to make updates to your unit file, simply repeat the above. You can stop the service via `sudo systemctl stop [service]`.
235 |
236 | #### Regarding EC2 costs
237 | You can find details on AWS EC2 pricing [here](https://aws.amazon.com/ec2/pricing/on-demand/). A `t3.small` instance typically costs ~0.02$/hour (~15-20$/month). We recommend that you monitor usage during your tests early on to ensure that no unexpected cost developments occur. Note also that you do not pay for the data transfer from S3 into EC2 if deployed within the same region.
238 |
239 | #### Regarding public EC2 IP
240 | Note that rebooting your EC2 instance will imply that your endpoint IP is changed - and thus you'll need to update your datasource. There are methods to set a fixed IP, though not in scope of this README.
241 |
242 | #### Regarding EC2 memory
243 | The backend will use RAM both for storing dataframes when processing log files, as well as for caching. The required RAM depends on your log file size and DBC - we recommend at least 2 GB of RAM for most use cases (which e.g. the `t3.small` has). On AWS EC2, the default behavior will be to 'kill' the process if RAM usage exceeds available memory. As an alternative to this behavior, you can consider using a (swap file](https://wiki.archlinux.org/title/Swap#Swap_file).
244 |
245 |
246 | ----
247 |
248 | ### Other comments
249 |
250 | #### Optional input arguments
251 |
252 | The CLI takes a number of optional input arguments - including below:
253 |
254 | - `limit`: Set a max limit (MB of MF4 logs) on how much data processed in one query (default: `100 MB`)
255 | - `tp_type`: Set to `uds`, `j1939` or `nmea` to enable multiframe decoding (default: Disabled)
256 | - `loglevel`: Set the console detail level: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default: `INFO`)
257 |
258 | #### Port forwarding a local deployment
259 |
260 | If you want to access the data remotely, you can set up port forwarding. Below we outline how to port forward the backend app for use as a datasource in Grafana Cloud - but you could of course also directly port forward your local Grafana dashboard directly via port `3000`.
261 |
262 | - Set up [port forwarding](https://portforward.com/) on your WiFi router for port `8080`
263 | - Run the app again (you may need to allow access via your firewall)
264 | - Find your [public IP](https://www.whatismyip.com/) to get your endpoint as: `http://[IP]:[port]` (e.g. `http://5.105.117.49:8080/`)
265 | - In Grafana, add your new endpoint URL and click `Save & test`
266 |
267 | ----
268 |
269 | ### Pending tasks
270 | Below are a list of pending items:
271 |
272 | - Optimize for speed (massive improvements to be expected in later versions)
273 | - Optimize Flask/Waitress session management for stability
274 | - Optimize caching/memory usage for stability
275 | - Improve performance for multiple DBC files and log files
276 | - Update code/guide for TLS-enabled deployment
277 | - Provide guidance on how to best scale the app for multiple front-end users
278 | - Determine if using `Browser` in SimpleJson datasource improves performance (requires TLS)
279 |
--------------------------------------------------------------------------------
/canedge_datasource/CanedgeFileSystem.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from canedge_browser import RelativeFileSystem
4 |
5 |
6 | class CanedgeFileSystem(RelativeFileSystem):
7 | """Extends the RelativeFileSystem class with CANedge specific methods
8 | """
9 |
10 | def __init__(self, *args, **kwargs):
11 | super().__init__(*args, **kwargs)
12 |
13 | def get_device_ids(self, reverse: bool = False) -> str:
14 | """Get device IDs """
15 | for elm in sorted(self.listdir("/", detail=False), reverse=reverse):
16 | if self.isdir(elm):
17 | device, _, _, _ = self.path_to_pars(elm)
18 | if device is not None:
19 | yield device
20 |
21 | def get_device_sessions(self, device: str, reverse: bool = False) -> (str, str):
22 | """Get sessions of device ID"""
23 | for elm in sorted(self.listdir(os.path.join("/", device), detail=False), reverse=reverse):
24 | if self.isdir(elm):
25 | device, session, _, _ = self.path_to_pars(elm)
26 | if None not in [device, session]:
27 | yield session, elm
28 |
29 | def get_device_splits(self, device: str, session: str, reverse: bool = False) -> (str, str):
30 | """Get splits of device ID and session"""
31 | for elm in sorted(self.listdir(os.path.join("/", device, session), detail=False), reverse=reverse):
32 | if self.isfile(elm):
33 | device, session, split, _ = self.path_to_pars(elm)
34 | if None not in [device, session, split]:
35 | yield split, elm
36 |
37 | def get_device_log_files(self, device, reverse: bool = False):
38 | """Gets all device log files. Note that this can be expensive"""
39 | for session, _ in self.get_device_sessions(device, reverse=reverse):
40 | for split, log_file in self.get_device_splits(device, session, reverse=reverse):
41 | yield log_file, session, split
42 |
43 | def path_to_pars(self, path):
44 | """Matches as much as possible of path to CANedge pars (device id, session, split, extension)"""
45 | pattern = r"^(?P[0-9A-F]{8})?((/)(?P\d{8}))?((/)(?P\d{8})(?:-[0-9A-F]{8}){0,1}(?P\.(MF4|MFC|MFM|MFE)))?$"
46 | match = re.match(pattern, path, re.IGNORECASE)
47 | if match:
48 | return match.group("device_id"), match.group("session_no"), match.group("split_no"), match.group("ext")
49 | else:
50 | return None, None, None, None
51 |
--------------------------------------------------------------------------------
/canedge_datasource/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, abort
2 | from flask_caching import Cache
3 | from fsspec import AbstractFileSystem
4 | from waitress import serve
5 |
6 | import logging
7 | logger = logging.getLogger(__name__)
8 |
9 | # Flask app
10 | app = Flask(__name__, instance_relative_config=True)
11 |
12 | # Flask app cache
13 | cache = Cache(config={'CACHE_TYPE': 'SimpleCache', 'CACHE_THRESHOLD': 25})
14 |
15 |
16 | def start_server(fs: AbstractFileSystem, dbs: [dict], passwords: [dict], port: int, limit_mb: int, tp_type: str):
17 | """
18 | Start server.
19 | :param fs: FS mounted in CANedge "root"
20 | :param dbs: List of databases
21 | :param passwords: List of log file passwords
22 | :param port: Port of the datasource server
23 | :param limit_mb: Limit amount of data to process
24 | :param tp_type: Type of ISO TP (multiframe) data to handle (uds, j1939, nmea)
25 | """
26 |
27 | # TODO: Not sure if this is the preferred way to share objects with the blueprints
28 | # Init processing flag
29 | app.processing = False
30 |
31 | # Add the shared fs and dbs to the app context
32 | app.fs = fs
33 | app.dbs = dbs
34 | app.passwords = passwords
35 | app.limit_mb = limit_mb
36 | app.tp_type = tp_type
37 |
38 | # Create cache for faster access on repeated calls
39 | cache.init_app(app)
40 |
41 | # Set simplecache logging level to WARNING to avoid heavy DEBUG level logging
42 | logging.getLogger('flask_caching.backends.simplecache').setLevel(logging.WARNING)
43 |
44 | # Register blueprints
45 | from canedge_datasource.alive import alive
46 | app.register_blueprint(alive)
47 |
48 | from canedge_datasource.query import query
49 | app.register_blueprint(query)
50 |
51 | from canedge_datasource.annotations import annotations
52 | app.register_blueprint(annotations)
53 |
54 | from canedge_datasource.search import search
55 | app.register_blueprint(search)
56 |
57 | # Use waitress to serve application
58 | serve(app, host='0.0.0.0', port=port, ident="canedge-grafana-backend")
59 |
60 |
61 | @app.before_request
62 | def before_request():
63 |
64 | logger.debug(f"Request: {request.method} {request.path}, {request.data}")
65 |
66 | # Limit load on /query endpoint
67 | if request.path == "/query":
68 | if app.processing is True:
69 | logger.info("Server busy, skipping query")
70 | abort(501)
71 | else:
72 | app.processing = True
73 |
74 |
75 | @app.after_request
76 | def after_request(response):
77 |
78 | logger.debug(f"Response: {response.status}")
79 |
80 | # Release query load limit
81 | if request.path == "/query":
82 | app.processing = False
83 |
84 | return response
85 |
--------------------------------------------------------------------------------
/canedge_datasource/alive.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | alive = Blueprint('alive', __name__)
4 |
5 | @alive.route('/',methods=['GET'])
6 | def alive_view():
7 | """
8 | Lets the frontend know that the backend is working
9 | """
10 | return "OK"
--------------------------------------------------------------------------------
/canedge_datasource/annotations.py:
--------------------------------------------------------------------------------
1 | import json
2 | import canedge_browser
3 | import mdf_iter
4 | from flask import Blueprint, jsonify, request
5 | from flask import current_app as app
6 | from canedge_datasource import cache
7 | from canedge_datasource.time_range import parse_time_range
8 |
9 | import logging
10 | logger = logging.getLogger(__name__)
11 |
12 | annotations = Blueprint('annotations', __name__)
13 |
14 |
15 | @annotations.route('/annotations', methods=['POST'])
16 | def annotations_view():
17 | """
18 | {"annotation":[NAME], [OPTIONAL]}
19 |
20 | Examples:
21 | {"annotation":"session", "device":"AABBCCDD"}
22 | {"annotation":"split", "device":"AABBCCDD"}
23 | """
24 |
25 | # Caching
26 | @cache.memoize(timeout=50)
27 | def annotations_cache(req):
28 |
29 | res = []
30 |
31 | query_req = req["annotation"].get("query", "")
32 | try:
33 | annotation_req = json.loads(query_req)
34 | except ValueError as e:
35 | logger.warning(f"Annotation parse fail: {query_req}")
36 | raise
37 |
38 | if "annotation" not in annotation_req:
39 | logger.warning(f"Unknown annotation request: {query_req}")
40 | raise ValueError
41 |
42 | if annotation_req["annotation"] not in ["session", "split"]:
43 | logger.warning(f"Unknown annotation request: {annotation_req['annotation']}")
44 | raise ValueError
45 |
46 | if "device" not in annotation_req:
47 | logger.warning("Unknown annotation device")
48 | raise ValueError
49 |
50 | # Get time interval to annotate
51 | start_date, stop_date = parse_time_range(req["range"]["from"], req["range"]["to"])
52 |
53 | # Get log files in time interval
54 |
55 | log_files = canedge_browser.get_log_files(app.fs, annotation_req["device"], start_date=start_date,
56 | stop_date=stop_date, passwords=app.passwords)
57 |
58 | for log_file in log_files:
59 |
60 | # Parse log file path
61 | device_id, session_no, split_no, ext = app.fs.path_to_pars(log_file)
62 |
63 | if None in [device_id, session_no, split_no, ext]:
64 | continue
65 |
66 | # Only generate annotation if annotation is split or annotation is session with first split file
67 | if not ((annotation_req["annotation"] == "split") or
68 | (annotation_req["annotation"] == "session" and int(split_no, 10) == 1)):
69 | continue
70 |
71 | # Get file start time
72 | with app.fs.open(log_file, "rb") as handle:
73 | mdf_file = mdf_iter.MdfFile(handle, passwords=app.passwords)
74 | log_file_start_timestamp_ns = mdf_file.get_first_measurement()
75 |
76 | res.append({
77 | "text": f"{log_file}\n"
78 | f"Session: {int(session_no, 10)}\n"
79 | f"Split: {int(split_no, 10)}\n"
80 | f"Size: {app.fs.size(log_file) >> 20} MB",
81 | "time": log_file_start_timestamp_ns / 1000000,
82 | })
83 |
84 | return jsonify(res)
85 |
86 | try:
87 | res = annotations_cache(request.get_json())
88 | except Exception as e:
89 | logger.warning(f"Failed to annotate: {e}")
90 | res = jsonify([])
91 | finally:
92 | return res
93 |
--------------------------------------------------------------------------------
/canedge_datasource/enums.py:
--------------------------------------------------------------------------------
1 | from enum import Enum, auto, IntEnum
2 |
3 |
4 | class CanedgeInterface(Enum):
5 |
6 | def __str__(self):
7 | return str(self.name)
8 |
9 | CAN = auto()
10 | LIN = auto()
11 |
12 |
13 | class CanedgeChannel(IntEnum):
14 |
15 | def __str__(self):
16 | return str(self.name)
17 |
18 | CH1 = 1
19 | CH2 = 2
20 |
21 |
22 | class SampleMethod(IntEnum):
23 |
24 | def __str__(self):
25 | return str(self.name)
26 |
27 | NEAREST = auto()
28 | MAX = auto()
29 | MIN = auto()
30 |
--------------------------------------------------------------------------------
/canedge_datasource/query.py:
--------------------------------------------------------------------------------
1 | import json
2 | from datetime import datetime
3 | from enum import IntEnum, auto
4 | from flask import Blueprint, jsonify, request
5 | from flask import current_app as app
6 | from canedge_datasource import cache
7 | from canedge_datasource.enums import CanedgeInterface, CanedgeChannel, SampleMethod
8 | from canedge_datasource.signal import SignalQuery, time_series_phy_data, table_raw_data, table_fs
9 | from canedge_datasource.time_range import parse_time_range
10 |
11 | import logging
12 | logger = logging.getLogger(__name__)
13 |
14 | query = Blueprint('query', __name__)
15 |
16 |
17 | class RequestType(IntEnum):
18 | """
19 | Defines what the user is requesting.
20 | """
21 | DATA = auto()
22 | INFO = auto()
23 |
24 |
25 | def _json_target_decode(dct):
26 | """Target json object decoder"""
27 | if "itf" in dct:
28 | dct["itf"] = CanedgeInterface[dct["itf"].upper()]
29 | if "chn" in dct:
30 | dct["chn"] = CanedgeChannel[dct["chn"].upper()]
31 | if "db" in dct:
32 | dct["db"] = dct["db"].lower()
33 | if "method" in dct:
34 | dct["method"] = SampleMethod[dct["method"].upper()]
35 | if "signal" in dct:
36 | # Grafana json plugin uses (X|Y|Z) to delimit multiple selections. Split to array
37 | dct["signal"] = dct["signal"].replace("(", "").replace(")", "").split("|")
38 | if "type" in dct:
39 | dct["type"] = RequestType[dct["type"].upper()]
40 | return dct
41 |
42 | def _json_decode_target(target):
43 | # Decode target (the query entered by the user formatted as json)
44 | try:
45 | return json.loads(target, object_hook=_json_target_decode)
46 | except KeyError as e:
47 | # Handle invalid enum mapping errors (key not exist)
48 | logger.warning(f"Invalid target {e}")
49 | return None
50 | except Exception as e:
51 | raise
52 |
53 | @query.route('/query', methods=['POST'])
54 | def query_view():
55 | """
56 | {"query":[NAME], [OPTIONAL]}
57 |
58 | The user query is stored in the "target" field.
59 |
60 | Table request example:
61 | {
62 | "panelId":1,
63 | "range": {
64 | "from": "2020-10-28T14:33:57.732Z",
65 | "to": "2020-10-28T15:01:25.048Z"
66 | },
67 | "intervalMs": 5000,
68 | "targets": [
69 | {
70 | "target": "...",
71 | "type": "table"
72 | }
73 | ],
74 | "maxDataPoints": 421
75 | }
76 |
77 | Time-series request example:
78 | {
79 | "panelId":1,
80 | "range": {
81 | "from": "2020-10-28T14:33:57.732Z",
82 | "to": "2020-10-28T15:01:25.048Z"
83 | },
84 | "intervalMs": 2000,
85 | "targets": [
86 | {
87 | "target": "...",
88 | "type": "timeserie"
89 | }
90 | ],
91 | "maxDataPoints": 831
92 | }
93 |
94 | The result of multiselect variables is formatted as e.g. "(AccelerationX|AccelerationY|AccelerationZ)"
95 |
96 | If one panel contains several queries, then these each becomes an element in "targets". Separate panels generate
97 | separate independent http requests - each with a unique "panelId".
98 |
99 | """
100 |
101 | # Caching on a request level. Drastically improves performance when the same panel is loaded twice - e.g. when
102 | # annotations are enabled/disabled without changing the view.
103 | @cache.memoize(timeout=50)
104 | def query_cache(req):
105 |
106 | res = []
107 |
108 | # Get query time interval
109 | start_date, stop_date = parse_time_range(req["range"]["from"], req["range"]["to"])
110 |
111 | # Get panel type (targets with mixed types not supported)
112 | panel_types = [x.get("type", "timeseries") for x in req["targets"]]
113 |
114 | if all(x == panel_types[0] for x in panel_types):
115 |
116 | if panel_types[0] in ["timeseries", "timeserie"]:
117 | res = _query_time_series(req, start_date, stop_date)
118 | elif panel_types[0] in ["table"]:
119 | res = _query_table(req, start_date, stop_date)
120 |
121 | return res
122 |
123 | # Get request
124 | req_in = request.get_json()
125 |
126 | # Drop unused and changing keys to improve caching (only works on identical requests)
127 | req_in.pop('requestId', None)
128 | req_in.pop('startTime', None)
129 |
130 | return jsonify(query_cache(req_in))
131 |
132 |
133 | def _query_time_series(req: dict, start_date: datetime, stop_date: datetime) -> list:
134 |
135 | # Loop all requested targets
136 | signal_queries = []
137 | for elm in req["targets"]:
138 |
139 | # Decode target
140 | target_req = _json_decode_target(elm["target"])
141 | if target_req is None:
142 | logger.warning(f"Failed to query target: {elm['target']}")
143 | continue
144 |
145 | # Fields required for series
146 | if not all(x in target_req for x in ["device", "itf", "chn", "db", "signal"]):
147 | logger.warning(f"Target missing required fields: {target_req}")
148 | continue
149 |
150 | # Check that DB is known
151 | if target_req["db"] not in app.dbs.keys():
152 | logger.warning(f"Unknown DB: {target_req['db']}")
153 | continue
154 |
155 | # If multiple signals in request, add each as signal query
156 | for signal in target_req["signal"]:
157 | # Provide a readable unique target name (the list of signals is replaced by the specific signal)
158 | target_name = ":".join([str(x) for x in dict(target_req, signal=signal).values()])
159 |
160 | signal_queries.append(SignalQuery(refid=elm["refId"],
161 | target=target_name,
162 | device=target_req["device"],
163 | itf=target_req["itf"],
164 | chn=target_req["chn"],
165 | db=app.dbs[target_req["db"]]["db"],
166 | signal_name=signal,
167 | interval_ms=int(req["intervalMs"]),
168 | method=target_req.get("method", SampleMethod.NEAREST)))
169 |
170 | # Get signals
171 | return time_series_phy_data(fs=app.fs,
172 | signal_queries=signal_queries,
173 | start_date=start_date,
174 | stop_date=stop_date,
175 | limit_mb=app.limit_mb,
176 | passwords=app.passwords,
177 | tp_type=app.tp_type)
178 |
179 | def _query_table(req: dict, start_date: datetime, stop_date: datetime) -> list:
180 |
181 | res = []
182 |
183 | # Currently, only a single table target supported
184 | elm = req["targets"][0]
185 | if len(req["targets"]) > 1:
186 | logger.warning(f"Table query with multiple targets not supported")
187 |
188 | # Decode target
189 | target_req = _json_decode_target(elm["target"])
190 | if target_req is None:
191 | logger.warning(f"Failed to query target: {elm['target']}")
192 | return res
193 |
194 | # Fields required for table
195 | if "device" in target_req:
196 |
197 | # Get request type, data or info
198 | request_type = target_req.get("type", RequestType.DATA)
199 |
200 | if request_type is RequestType.DATA:
201 | res = table_raw_data(fs=app.fs,
202 | device=target_req["device"],
203 | start_date=start_date,
204 | stop_date=stop_date,
205 | max_data_points=req["maxDataPoints"],
206 | passwords=app.passwords)
207 |
208 | elif request_type is RequestType.INFO:
209 | res = table_fs(fs=app.fs,
210 | device=target_req["device"],
211 | start_date=start_date,
212 | stop_date=stop_date,
213 | max_data_points=req["maxDataPoints"],
214 | passwords=app.passwords)
215 |
216 | return res
217 |
--------------------------------------------------------------------------------
/canedge_datasource/search.py:
--------------------------------------------------------------------------------
1 | import json
2 | import mdf_iter
3 | from flask import Blueprint, jsonify, request
4 | from flask import current_app as app
5 | from canedge_datasource import cache
6 | from canedge_datasource.enums import CanedgeInterface, CanedgeChannel, SampleMethod
7 |
8 | import logging
9 | logger = logging.getLogger(__name__)
10 |
11 | search = Blueprint('search', __name__)
12 |
13 |
14 | @search.route('/search', methods=['POST'])
15 | def search_view():
16 | """
17 | {"search":[NAME], [OPTIONAL]}
18 | :return:
19 | """
20 |
21 | # Caching. Search calls are repeated each time a panel is loaded. Caching reduces communication with the backend
22 | @cache.memoize(timeout=50)
23 | def search_cache(req):
24 |
25 | def get_logfile_comment(log_file):
26 | comment = ""
27 | with app.fs.open(log_file, "rb") as handle:
28 | try:
29 | meta_data = mdf_iter.MdfFile(handle, passwords=app.passwords).get_metadata()
30 | comment = meta_data.get("HDcomment.File Information.comment", {}).get("value_raw").strip()
31 | except:
32 | logger.warning("Could not extract meta data from log file")
33 | pass
34 | return comment
35 |
36 | res = []
37 |
38 | target = req.get("target", "")
39 | try:
40 | req = json.loads(target)
41 | except ValueError as e:
42 | logger.warning(f"Search parse fail: {target}")
43 | raise
44 |
45 | if "search" in req:
46 | if req["search"] == "device":
47 | # Return list of devices
48 | res = list(app.fs.get_device_ids())
49 | elif req["search"] == "device_name":
50 | # Return list of device ids and meta comments (slow)
51 | for device in app.fs.get_device_ids():
52 | # Get most recent log file
53 | try:
54 | log_file, _, _ = next(app.fs.get_device_log_files(device=device, reverse=True), None)
55 | # Get log file comment
56 | if log_file is not None:
57 | comment = " " + get_logfile_comment(log_file)
58 | else:
59 | comment = ""
60 | except:
61 | print(f"Unable to list log files for {device} - review folder structure and log file names")
62 | comment = ""
63 |
64 | res.append({"text": f"{device}{comment}", "value": device})
65 | elif req["search"] == "itf":
66 | # Return list of interfaces
67 | res = [x.name for x in CanedgeInterface]
68 | elif req["search"] == "chn":
69 | # Return list channels
70 | res = [x.name for x in CanedgeChannel]
71 | elif req["search"] == "db":
72 | # Return list of loaded DBs
73 | res = list(app.dbs.keys())
74 | elif req["search"] == "method":
75 | # Return list of sampling methods
76 | res = [x.name for x in SampleMethod]
77 | elif req["search"] == "signal" and "db" in req:
78 | # Return list of signals in db
79 | db_name = req["db"].lower()
80 | if db_name in app.dbs.keys():
81 | res = app.dbs[db_name]["signals"]
82 | else:
83 | logger.warning(f"Unknown search: {req}")
84 |
85 | return jsonify(res)
86 |
87 | try:
88 | res = search_cache(request.get_json())
89 | except Exception as e:
90 | logger.warning(f"Failed to search: {e}")
91 | res = jsonify([])
92 | finally:
93 | return res
94 |
--------------------------------------------------------------------------------
/canedge_datasource/signal.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from can_decoder import SignalDB
3 | from itertools import groupby
4 | import numpy as np
5 | import pandas as pd
6 | import canedge_browser
7 | import can_decoder
8 | import mdf_iter
9 | from datetime import datetime
10 | from utils import MultiFrameDecoder
11 |
12 | from canedge_datasource import cache
13 | from canedge_datasource.enums import CanedgeInterface, CanedgeChannel, SampleMethod
14 |
15 | import logging
16 | logger = logging.getLogger(__name__)
17 |
18 |
19 | @dataclass
20 | class SignalQuery:
21 |
22 | def __str__(self):
23 | return (f"{self.refid}, {self.target}, {self.device}, {self.itf.name}, {self.chn.name}, {self.signal_name}, "
24 | f"{self.interval_ms}, {self.method.name}")
25 |
26 | refid: str
27 | target: str
28 | device: str
29 | itf: CanedgeInterface
30 | chn: CanedgeChannel
31 | db: SignalDB
32 | signal_name: str
33 | interval_ms: int
34 | method: SampleMethod = SampleMethod.NEAREST
35 |
36 |
37 | def table_fs(fs, device, start_date: datetime, stop_date: datetime, max_data_points, passwords) -> list:
38 | """
39 | Returns a list of log files as table
40 | """
41 |
42 | # Find log files
43 | log_files = canedge_browser.get_log_files(fs, device, start_date=start_date, stop_date=stop_date,
44 | passwords=passwords)
45 |
46 | rows = []
47 | for log_file in log_files:
48 |
49 | # Get file start time
50 | with fs.open(log_file, "rb") as handle:
51 | mdf_file = mdf_iter.MdfFile(handle, passwords=passwords)
52 | start_epoch_ms = mdf_file.get_first_measurement() / 1000000
53 | meta_data = mdf_file.get_metadata()
54 |
55 | # Get size
56 | size_mb = fs.size(log_file) >> 20
57 | session = meta_data.get("HDcomment.File Information.session", {}).get("value_raw")
58 | split = meta_data.get("HDcomment.File Information.split", {}).get("value_raw")
59 | config_crc = meta_data.get("HDcomment.Device Information.config crc32 checksum", {}).get("value_raw")
60 | hw_rev = meta_data.get("HDcomment.Device Information.hardware version", {}).get("value_raw")
61 | fw_rev = meta_data.get("HDcomment.Device Information.firmware version", {}).get("value_raw")
62 | storage_free = meta_data.get("HDcomment.Device Information.storage free", {}).get("value_raw")
63 | storage_total = meta_data.get("HDcomment.Device Information.storage total", {}).get("value_raw")
64 | comment = meta_data.get("HDcomment.File Information.comment", {}).get("value_raw").strip()
65 |
66 | storage_mb = ""
67 | if storage_free is not None and storage_total is not None:
68 | storage_mb = f"{int(storage_total)-int(storage_free)>>10}/{int(storage_total)>>10}"
69 |
70 | rows.append([start_epoch_ms, device, session, split, size_mb, config_crc, hw_rev, fw_rev, storage_mb, log_file, comment])
71 |
72 | # {"type": "info", "device":"79A2DD1A"}
73 |
74 | if len(rows) >= max_data_points:
75 | break
76 |
77 | res = [
78 | {
79 | "type": "table",
80 | "columns": [
81 | {"text": "TIME", "type": "time"},
82 | {"text": "DEVICE", "type": "string"},
83 | {"text": "SESSION", "type": "string"},
84 | {"text": "SPLIT", "type": "string"},
85 | {"text": "SIZE [MB]", "type": "string"},
86 | {"text": "CONFIG CRC", "type": "string"},
87 | {"text": "HW", "type": "string"},
88 | {"text": "FW", "type": "string"},
89 | {"text": "STORAGE [MB]", "type": "string"},
90 | {"text": "NAME", "type": "string"},
91 | {"text": "META", "type": "string"},
92 | ],
93 | "rows": rows
94 | }
95 | ]
96 | return res
97 |
98 |
99 | def table_raw_data(fs, device, start_date: datetime, stop_date: datetime, max_data_points, passwords) -> list:
100 | """
101 | Returns raw log file data as table
102 | """
103 |
104 | # Find log files
105 | log_files = canedge_browser.get_log_files(fs, device, start_date=start_date, stop_date=stop_date,
106 | passwords=passwords)
107 |
108 | # Load log files one at a time until max_data_points
109 | df_raw = pd.DataFrame()
110 | for log_file in log_files:
111 |
112 | _, df_raw_can, df_raw_lin, = _load_log_file(fs, log_file, [CanedgeInterface.CAN, CanedgeInterface.LIN],
113 | passwords)
114 |
115 | # Add interface column
116 | df_raw_can['ITF'] = "CAN"
117 | df_raw_lin['ITF'] = "LIN"
118 |
119 | # Lin set extended to 0
120 | df_raw_lin['IDE'] = 0
121 |
122 | # Merge data frames
123 | df_raw_chunk = pd.concat([df_raw_can, df_raw_lin])
124 |
125 | # Keep only selected time interval (files may contain a bit more at both ends)
126 | df_raw_chunk = df_raw_chunk.loc[start_date:stop_date]
127 |
128 | # Append
129 | df_raw = pd.concat([df_raw, df_raw_chunk])
130 |
131 | # Max data points reached?
132 | if len(df_raw) >= max_data_points:
133 | break
134 |
135 | # Any data in time interval?
136 | if len(df_raw) == 0:
137 | return None
138 |
139 | # Reset the index to get the timestamp as column
140 | df_raw.reset_index(inplace=True)
141 |
142 | # Keep only selected columns
143 | df_raw = df_raw[["TimeStamp", 'ITF', 'BusChannel', 'ID', 'IDE', 'DataLength', 'DataBytes']]
144 |
145 | # Rename columns
146 | df_raw.rename(columns={"TimeStamp": "TIME", "BusChannel": "CHN", "DataLength": "NOB", 'DataBytes': "DATA"},
147 | inplace=True)
148 |
149 | # Cut to max data points
150 | df_raw = df_raw[0:max_data_points]
151 |
152 | # Change from datetime to epoch
153 | df_raw['TIME'] = df_raw['TIME'].astype(np.int64) / 10 ** 6
154 |
155 | # Change "Databytes" from array to hex string
156 | df_raw['DATA'] = df_raw['DATA'].apply(lambda data: ' '.join('{:02X}'.format(x) for x in data))
157 |
158 | # Column names and types
159 | columns = [{"text": x, "type": "time" if x == "TIME" else "string"} for x in list(df_raw.columns)]
160 |
161 | # Return formatted output
162 | return [{"type": "table", "columns": columns, "rows": df_raw.values.tolist()}]
163 |
164 |
165 | def time_series_phy_data(fs, signal_queries: [SignalQuery], start_date: datetime, stop_date: datetime, limit_mb,
166 | passwords, tp_type) -> dict:
167 | """
168 | Returns time series based on a list of signal queries.
169 |
170 | The function is optimized towards:
171 | - Low memory usage (only one file loaded at a time).
172 | - As few decoding runs as possible (time expensive)
173 |
174 | For each device, a log file is only loaded once. To obtain this, the signal requests are first grouped by device ID.
175 |
176 | The decoder takes one db and assumes that this should be applied all entries (regardless of channel and interface).
177 | As a result, it is needed to run the decoder for each combination of db, channel and interface.
178 | Signals from the same channel and interface can be grouped to process all in one run (applied after device grouping)
179 |
180 | Returns as a list of dicts. Each dict contains the signal "target" name and data points as a list of value (float/str)
181 | and timestamp (float) tuples.
182 |
183 | e.g.
184 | [
185 | {'target': 'A', 'datapoints': [(0.1, 1603895728164.1)]},
186 | {'target': 'B', 'datapoints': [(1.1, 1603895728164.1), (1.2, 1603895729164.15)]},
187 | {'target': 'C', 'datapoints': [(3.1, 1603895728164.1), (3.2, 1603895729164.15), (3.3, 1603895729164.24)]}
188 | ]
189 | """
190 |
191 | # Init response to make sure that we respond to all targets, even if without data points
192 | result = [{'refId': x.refid, 'target': x.target, 'datapoints': []} for x in signal_queries]
193 |
194 | # Keep track on how much data has been processed (in MB)
195 | data_processed_mb = 0
196 |
197 | # Group the signal queries by device, such that files from the same device needs to be loaded only once
198 | for device, device_group in groupby(signal_queries, lambda x: x.device):
199 |
200 | device_group = list(device_group)
201 |
202 | # Find log files
203 | log_files = canedge_browser.get_log_files(fs, device, start_date=start_date, stop_date=stop_date,
204 | passwords=passwords)
205 |
206 | # Load log files one at a time (to reduce memory usage)
207 | session_previous = None
208 | for log_file in log_files:
209 |
210 | # Check if log file is in new session
211 | _, session_current, _, _ = fs.path_to_pars(log_file)
212 | new_session = False
213 | if session_previous is not None and session_previous != session_current:
214 | new_session = True
215 | session_previous = session_current
216 |
217 | # Get size of file
218 | file_size_mb = fs.stat(log_file)["size"] >> 20
219 |
220 | # Check if we have reached the limit of data processed in MB
221 | if data_processed_mb + file_size_mb > limit_mb:
222 | logger.info(f"File: {log_file} - Skipping (limit {limit_mb} MB)")
223 | continue
224 | logger.info(f"File: {log_file}")
225 |
226 | # Update size of data processed
227 | data_processed_mb += file_size_mb
228 |
229 | # TODO: caching can be improved on by taking log file and signal - such that the cached data contain the
230 | # decoded signals (with max time resolution). Currently, the cache contains all data, which does not improve
231 | # loading times significantly (and takes a lot of memory)
232 | start_epoch, df_raw_can, df_raw_lin = _load_log_file(fs, log_file, [x.itf for x in device_group], passwords)
233 |
234 | # Keep only selected time interval (files may contain a more at both ends). Do this early to process as
235 | # little data as possible
236 | df_raw_can = df_raw_can.loc[start_date:stop_date]
237 | df_raw_lin = df_raw_lin.loc[start_date:stop_date]
238 |
239 | # If no data, continue to next file
240 | if len(df_raw_can) == 0 and len(df_raw_lin) == 0:
241 | continue
242 |
243 | # Group queries using the same db, interface and channel (to minimize the number of decoding runs)
244 | for (itf, chn, db), decode_group in groupby(device_group, lambda x: (x.itf, x.chn, x.db)):
245 |
246 | decode_group = list(decode_group)
247 |
248 | # Keep only selected interface (only signals from the same interface are grouped)
249 | df_raw = df_raw_can if itf == CanedgeInterface.CAN else df_raw_lin
250 |
251 | # Keep only selected channel
252 | df_raw = df_raw.loc[df_raw['BusChannel'] == int(chn)]
253 |
254 | if df_raw.empty:
255 | continue
256 |
257 | # If IDE missing (LIN) add dummy allow decoding
258 | if 'IDE' not in df_raw:
259 | df_raw['IDE'] = 0
260 |
261 | # Filter out IDs not used before the costly decoding step (bit 32 cleared). For simplicity, does not
262 | # differentiate standard and extended. Result is potentially unused IDs passed for decoding if overlaps
263 | if db.protocol == "J1939":
264 | #TODO Find out how to do pre-filtering on PGNs to speed up J1939 decoding
265 | pass
266 | else:
267 | df_raw = df_raw[df_raw['ID'].isin([x & 0x7FFFFFFF for x in db.frames.keys()])]
268 | # TODO optimize by using requested signals to create a smaller subset DBC and filter by that
269 |
270 | if tp_type != "":
271 | # Decode after first re-segmenting CAN data according to TP type (uds, j1939, nmea)
272 | #TODO Optimize for speed
273 | tp = MultiFrameDecoder(tp_type)
274 | df_raw = tp.combine_tp_frames(df_raw)
275 |
276 | df_phys_temp = []
277 | for length, group in df_raw.groupby("DataLength"):
278 | df_phys_group = can_decoder.DataFrameDecoder(db).decode_frame(group)
279 |
280 | if 'Signal' not in df_phys_group.columns:
281 | continue
282 |
283 | df_phys_temp.append(df_phys_group)
284 |
285 | if len(df_phys_temp) > 0:
286 | df_phys = pd.concat(df_phys_temp,ignore_index=False).sort_index()
287 | else:
288 | df_phys = pd.DataFrame()
289 |
290 | # commented out the original option of a "clean" decoding due to lack of support for mixed DLC rows in df_raw
291 | # df_phys = can_decoder.DataFrameDecoder(db).decode_frame(df_raw)
292 |
293 |
294 | # Check if output contains any signals
295 | if 'Signal' not in df_phys.columns:
296 | continue
297 |
298 | # Keep only requested signals
299 | df_phys = df_phys[df_phys['Signal'].isin([x.signal_name for x in decode_group])]
300 |
301 | # Drop unused columns
302 | df_phys.drop(['CAN ID', 'Raw Value'], axis=1, inplace=True)
303 |
304 | # Resample each signal using the specific method and interval.
305 | # Making sure that only existing/real data points are included in the output (no interpolations etc).
306 | for signal_group in decode_group:
307 |
308 | # "Backup" the original timestamps, such that these can be used after resampling
309 | df_phys['time_orig'] = df_phys.index
310 |
311 | # Extract the signal
312 | df_phys_signal = df_phys.loc[df_phys['Signal'] == signal_group.signal_name]
313 |
314 | # Pick the min, max or nearest. This picks a real data value but potentially a "fake" timestamp
315 | interval_ms = signal_group.interval_ms
316 | if signal_group.method == SampleMethod.MIN:
317 | df_phys_signal_resample = df_phys_signal.resample(f"{interval_ms}ms").min()
318 | elif signal_group.method == SampleMethod.MAX:
319 | df_phys_signal_resample = df_phys_signal.resample(f"{interval_ms}ms").max()
320 | else:
321 | df_phys_signal_resample = df_phys_signal.resample(f"{interval_ms}ms").nearest()
322 |
323 | # The "original" time was also resampled. Use this to restore true data points.
324 | # Drop duplicates and nans (duplicates were potentially created during "nearest" resampling)
325 | # This also makes sure that data is never up-sampled
326 | df_phys_signal_resample.drop_duplicates(subset='time_orig', inplace=True)
327 | df_phys_signal_resample.dropna(axis=0, how='any', inplace=True)
328 |
329 | # Timestamps and values to list
330 | timestamps = (df_phys_signal_resample["time_orig"].astype(np.int64) / 10 ** 6).tolist()
331 | values = df_phys_signal_resample["Physical Value"].values.tolist()
332 |
333 | # Get the list index of the result to update
334 | result_index = [idx for idx, value in enumerate(result) if value['target'] == signal_group.target][0]
335 |
336 | # If new session, insert a None/null data point to indicate that data is not continuous
337 | if new_session:
338 | result[result_index]["datapoints"].extend([[None, None]])
339 |
340 | # Update result with additional datapoints
341 | result[result_index]["datapoints"].extend(list(zip(values, timestamps)))
342 |
343 | return result
344 |
345 |
346 | def _load_log_file(fs, file, itf_used, passwords):
347 |
348 | # As local function to be able to cache result
349 | @cache.memoize(timeout=50)
350 | def _load_log_file_cache(file_in, itf_used_in, passwords_in):
351 | with fs.open(file_in, "rb") as handle:
352 | mdf_file = mdf_iter.MdfFile(handle, passwords=passwords_in)
353 |
354 | # Get log file start time
355 | start_epoch = datetime.utcfromtimestamp(mdf_file.get_first_measurement() / 1000000000)
356 |
357 | # Load only the interfaces which are used
358 | df_raw_can_local = mdf_file.get_data_frame() if CanedgeInterface.CAN in itf_used_in else pd.DataFrame()
359 | df_raw_lin_local = mdf_file.get_data_frame_lin() if CanedgeInterface.LIN in itf_used_in else pd.DataFrame()
360 |
361 | return start_epoch, df_raw_can_local, df_raw_lin_local
362 |
363 | return _load_log_file_cache(file, itf_used, passwords)
364 |
--------------------------------------------------------------------------------
/canedge_datasource/time_range.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import pytz
3 |
4 |
5 | def parse_time_range(start_date_str: str, stop_date_str: str) -> (datetime, datetime):
6 | """
7 | From / to times are provided in ISO8601 format without timezone information (naive). The timezone, however,
8 | is always UTC. By updating the timezone information the datetime objects become timezone aware.
9 | """
10 | start_date = datetime.strptime(start_date_str, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=pytz.UTC)
11 | stop_date = datetime.strptime(stop_date_str, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=pytz.UTC)
12 |
13 | return start_date, stop_date
14 |
--------------------------------------------------------------------------------
/canedge_datasource_cli.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import sys
4 | import click
5 | from pathlib import Path
6 | from canedge_datasource import start_server
7 | from canedge_datasource.CanedgeFileSystem import CanedgeFileSystem
8 | from urllib.parse import urlparse
9 | from urllib.request import url2pathname
10 |
11 |
12 | @click.command()
13 | @click.argument('data_url', envvar='CANEDGE_DATA_URL')
14 | @click.option('--port', required=False, default=5000, type=int, help='The port of the datasource server')
15 | @click.option('--limit', required=False, default=100, type=int, help='Limit on data to process in MB')
16 | @click.option('--s3_ak', required=False, envvar='CANEDGE_S3_AK', type=str, help='S3 access key')
17 | @click.option('--s3_sk', required=False, envvar='CANEDGE_S3_SK', type=str, help='S3 secret key')
18 | @click.option('--s3_bucket', required=False, envvar='CANEDGE_S3_BUCKET', type=str, help='S3 bucket name')
19 | @click.option('--s3_cert', required=False, envvar='CANEDGE_S3_CERT', type=click.Path(), help='S3 cert path')
20 | @click.option('--loglevel', required=False, default="INFO",
21 | type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]), help='Logging level')
22 | @click.option('--tp_type', required=False, default="", type=str, help='ISO TP type (uds, j1939, nmea)')
23 |
24 | def main(data_url, port, limit, s3_ak, s3_sk, s3_bucket, s3_cert, loglevel, tp_type):
25 | """
26 | CANedge Grafana Datasource. Provide a URL pointing to a CANedge data root.
27 |
28 | Optionally place decoding rules file(s) (*.dbc) and passwords file (passwords.json) in data source root.
29 |
30 | Example of passwords.json content:
31 |
32 | {"AABBCCDD": "MySecret22BczPassword1234@482", "11223344": "MyOtherSecretPassword512312zZ"}
33 |
34 | Examples
35 |
36 | Scheme: file (local file system)
37 |
38 | OS: Windows
39 |
40 | file:///c:/data/
41 |
42 | file:///c:/Users/bob/Documents/data
43 |
44 | file:///f:/
45 |
46 | OS: Linux:
47 |
48 | file:///home/data/
49 |
50 | Scheme: HTTP (S3):
51 |
52 | http://s3.eu-central-1.amazonaws.com
53 |
54 | http://192.168.0.100
55 |
56 | http://192.168.0.100:5000
57 |
58 | Scheme: HTTPS (S3):
59 |
60 | https://s3.eu-central-1.amazonaws.com
61 |
62 | https://192.168.0.100
63 |
64 | https://192.168.0.100:5000
65 | """
66 |
67 | # Set log level
68 | loglevel_number = getattr(logging, loglevel.upper())
69 | logging.basicConfig(level=loglevel_number, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
70 |
71 | # Set up file system
72 | print(f"Mount path: {data_url}")
73 | url = urlparse(data_url)
74 |
75 | # Local file system
76 | if url.scheme == "file" and url.path != "":
77 | fs = CanedgeFileSystem(protocol="file", base_path=url2pathname(url.path))
78 |
79 | # S3
80 | elif url.scheme in ["http", "https"] and url.path == "":
81 | if s3_ak is None or s3_sk is None or s3_bucket is None:
82 | sys.exit("Missing S3 information")
83 |
84 | # Server args
85 | args = {"endpoint_url": data_url}
86 |
87 | # If provided, add cert path
88 | if s3_cert is not None:
89 | s3_cert_path = Path(s3_cert)
90 |
91 | # Check if path exist
92 | if not s3_cert_path.is_file():
93 | logging.error(f"Cert not found: {s3_cert}")
94 | sys.exit(-1)
95 |
96 | args["verify"] = s3_cert_path
97 |
98 | fs = CanedgeFileSystem(protocol="s3", base_path=s3_bucket, key=s3_ak, secret=s3_sk, client_kwargs=args, use_listings_cache=False)
99 | else:
100 | logging.error(f"Unsupported data URL: {data_url}")
101 | sys.exit(-1)
102 |
103 | # Load DBs in root
104 | logging.getLogger("canmatrix").setLevel(logging.ERROR)
105 | import can_decoder
106 | dbs = {}
107 | for db_path in fs.glob("*.dbc"):
108 | db_name = Path(db_path).stem.lower()
109 | with fs.open(db_path) as fp:
110 | db = can_decoder.load_dbc(fp)
111 | dbs[db_name] = {"db": db, "signals": db.signals()}
112 |
113 | print(f"Loaded DBs: {', '.join(dbs.keys())}")
114 |
115 | # Load passwords file if exists
116 | passwords = {}
117 | if fs.isfile("passwords.json"):
118 | try:
119 | with fs.open("passwords.json") as fp:
120 | passwords = json.load(fp)
121 | print("Loaded passwords file")
122 | except Exception as e:
123 | logging.error(f"Unable to load passwords file")
124 | sys.exit(-1)
125 |
126 | start_server(fs, dbs, passwords, port, limit, tp_type)
127 |
128 | if __name__ == '__main__':
129 | main()
130 |
--------------------------------------------------------------------------------
/canedge_grafana_backend.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=CANedge Grafana Backend service v2
3 | After=network.target
4 | StartLimitIntervalSec=0
5 |
6 | [Service]
7 | Type=simple
8 | Restart=always
9 | RestartSec=1
10 | User=ubuntu
11 | ExecStart=/home/ubuntu/canedge-grafana-backend/env/bin/python3 -u /home/ubuntu/canedge-grafana-backend/canedge_datasource_cli.py file:////home/ubuntu/canedge-grafana-backend/LOG --port 8080
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/dashboard_templates/dashboard-template-sample-data.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": "-- Grafana --",
7 | "enable": true,
8 | "hide": true,
9 | "iconColor": "rgba(0, 211, 255, 1)",
10 | "name": "Annotations & Alerts",
11 | "target": {
12 | "limit": 100,
13 | "matchAny": false,
14 | "tags": [],
15 | "type": "dashboard"
16 | },
17 | "type": "dashboard"
18 | },
19 | {
20 | "enable": true,
21 | "iconColor": "dark-orange",
22 | "name": "Session",
23 | "query": "{\"annotation\": \"session\", \"device\": \"${DEVICE}\"}"
24 | },
25 | {
26 | "enable": true,
27 | "iconColor": "#ffce6c",
28 | "name": "Split",
29 | "query": "{\"annotation\": \"split\", \"device\": \"${DEVICE}\"}"
30 | }
31 | ]
32 | },
33 | "description": "",
34 | "editable": true,
35 | "fiscalYearStartMonth": 0,
36 | "graphTooltip": 0,
37 | "id": 11,
38 | "iteration": 1646914653824,
39 | "links": [],
40 | "liveNow": false,
41 | "panels": [
42 | {
43 | "aliasColors": {},
44 | "bars": false,
45 | "dashLength": 10,
46 | "dashes": false,
47 | "description": "",
48 | "fill": 1,
49 | "fillGradient": 0,
50 | "gridPos": {
51 | "h": 12,
52 | "w": 15,
53 | "x": 0,
54 | "y": 0
55 | },
56 | "hiddenSeries": false,
57 | "id": 4,
58 | "legend": {
59 | "avg": false,
60 | "current": false,
61 | "max": false,
62 | "min": false,
63 | "show": true,
64 | "total": false,
65 | "values": false
66 | },
67 | "lines": true,
68 | "linewidth": 1,
69 | "maxDataPoints": 400,
70 | "nullPointMode": "null",
71 | "options": {
72 | "alertThreshold": true
73 | },
74 | "percentage": false,
75 | "pluginVersion": "8.4.3",
76 | "pointradius": 0.5,
77 | "points": true,
78 | "renderer": "flot",
79 | "seriesOverrides": [],
80 | "spaceLength": 10,
81 | "stack": false,
82 | "steppedLine": false,
83 | "targets": [
84 | {
85 | "datasource": {
86 | "type": "grafana-simple-json-datasource"
87 | },
88 | "hide": false,
89 | "refId": "Variables",
90 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"${ITF}\",\"chn\":\"${CHN}\",\"db\":\"${DB}\",\"signal\":\"${SIGNAL}\"}",
91 | "type": "timeserie"
92 | },
93 | {
94 | "datasource": {
95 | "type": "grafana-simple-json-datasource"
96 | },
97 | "hide": false,
98 | "refId": "Position",
99 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"(Latitude|Longitude)\"}",
100 | "type": "timeserie"
101 | },
102 | {
103 | "datasource": {
104 | "type": "grafana-simple-json-datasource"
105 | },
106 | "hide": false,
107 | "refId": "Speed",
108 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"(Speed)\"}",
109 | "type": "timeserie"
110 | },
111 | {
112 | "datasource": {
113 | "type": "grafana-simple-json-datasource"
114 | },
115 | "hide": false,
116 | "refId": "DistanceTrip",
117 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"(DistanceTrip|DistanceAccuracy)\"}",
118 | "type": "timeserie"
119 | },
120 | {
121 | "datasource": {
122 | "type": "grafana-simple-json-datasource"
123 | },
124 | "hide": false,
125 | "refId": "Attitude",
126 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"(Roll|Pitch|Heading)\"}",
127 | "type": "timeserie"
128 | },
129 | {
130 | "datasource": {
131 | "type": "grafana-simple-json-datasource"
132 | },
133 | "hide": false,
134 | "refId": "Sattelites",
135 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"Satellites\"}",
136 | "type": "timeserie"
137 | },
138 | {
139 | "datasource": {
140 | "type": "grafana-simple-json-datasource"
141 | },
142 | "hide": false,
143 | "refId": "DistanceTotal",
144 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"DistanceTotal\"}",
145 | "type": "timeserie"
146 | },
147 | {
148 | "datasource": {
149 | "type": "grafana-simple-json-datasource"
150 | },
151 | "hide": false,
152 | "refId": "GeoFences",
153 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"CAN\",\"chn\":\"CH2\",\"db\":\"canmod-gps\",\"signal\":\"(Fence1|Fence2|Fence3|Fence4)\"}",
154 | "type": "timeserie"
155 | }
156 | ],
157 | "thresholds": [],
158 | "timeRegions": [],
159 | "title": "${SIGNAL}",
160 | "tooltip": {
161 | "shared": true,
162 | "sort": 0,
163 | "value_type": "individual"
164 | },
165 | "transformations": [
166 | {
167 | "id": "filterByRefId",
168 | "options": {
169 | "include": "Variables"
170 | }
171 | }
172 | ],
173 | "type": "graph",
174 | "xaxis": {
175 | "mode": "time",
176 | "show": true,
177 | "values": []
178 | },
179 | "yaxes": [
180 | {
181 | "format": "short",
182 | "logBase": 1,
183 | "show": true
184 | },
185 | {
186 | "format": "short",
187 | "logBase": 1,
188 | "show": true
189 | }
190 | ],
191 | "yaxis": {
192 | "align": false
193 | }
194 | },
195 | {
196 | "autoZoom": true,
197 | "datasource": {
198 | "type": "datasource",
199 | "uid": "-- Dashboard --"
200 | },
201 | "defaultLayer": "OpenStreetMap",
202 | "description": "",
203 | "gridPos": {
204 | "h": 12,
205 | "w": 9,
206 | "x": 15,
207 | "y": 0
208 | },
209 | "id": 3,
210 | "lineColor": "#3d85c6",
211 | "maxDataPoints": 400,
212 | "pluginVersion": "8.4.1",
213 | "pointColor": "#FF780A",
214 | "scrollWheelZoom": false,
215 | "showLayerChanger": true,
216 | "targets": [
217 | {
218 | "datasource": {
219 | "type": "datasource",
220 | "uid": "-- Dashboard --"
221 | },
222 | "panelId": 4,
223 | "refId": "A"
224 | }
225 | ],
226 | "title": "GPS Position",
227 | "transformations": [
228 | {
229 | "id": "filterByRefId",
230 | "options": {
231 | "include": "Position"
232 | }
233 | }
234 | ],
235 | "type": "pr0ps-trackmap-panel"
236 | },
237 | {
238 | "aliasColors": {
239 | "2F6913DB:CAN:CH2:canmod-gps:Speed": "#3d85c6"
240 | },
241 | "bars": false,
242 | "dashLength": 10,
243 | "dashes": false,
244 | "datasource": {
245 | "type": "datasource",
246 | "uid": "-- Dashboard --"
247 | },
248 | "description": "",
249 | "fieldConfig": {
250 | "defaults": {
251 | "unit": "none"
252 | },
253 | "overrides": []
254 | },
255 | "fill": 1,
256 | "fillGradient": 0,
257 | "gridPos": {
258 | "h": 6,
259 | "w": 5,
260 | "x": 0,
261 | "y": 12
262 | },
263 | "hiddenSeries": false,
264 | "id": 5,
265 | "legend": {
266 | "avg": false,
267 | "current": false,
268 | "max": false,
269 | "min": false,
270 | "show": true,
271 | "total": false,
272 | "values": false
273 | },
274 | "lines": true,
275 | "linewidth": 1,
276 | "maxDataPoints": 400,
277 | "nullPointMode": "null",
278 | "options": {
279 | "alertThreshold": true
280 | },
281 | "percentage": false,
282 | "pluginVersion": "8.4.3",
283 | "pointradius": 2,
284 | "points": false,
285 | "renderer": "flot",
286 | "seriesOverrides": [
287 | {
288 | "alias": "2F6913DB:CAN:CH2:canmod-gps:SpeedAccuracy",
289 | "yaxis": 2
290 | }
291 | ],
292 | "spaceLength": 10,
293 | "stack": false,
294 | "steppedLine": false,
295 | "targets": [
296 | {
297 | "datasource": {
298 | "type": "datasource",
299 | "uid": "-- Dashboard --"
300 | },
301 | "panelId": 4,
302 | "refId": "A"
303 | }
304 | ],
305 | "thresholds": [],
306 | "timeRegions": [],
307 | "title": "Speed (m/s)",
308 | "tooltip": {
309 | "shared": true,
310 | "sort": 0,
311 | "value_type": "individual"
312 | },
313 | "transformations": [
314 | {
315 | "id": "filterByRefId",
316 | "options": {
317 | "include": "Speed"
318 | }
319 | }
320 | ],
321 | "type": "graph",
322 | "xaxis": {
323 | "mode": "time",
324 | "show": true,
325 | "values": []
326 | },
327 | "yaxes": [
328 | {
329 | "format": "none",
330 | "logBase": 1,
331 | "show": true
332 | },
333 | {
334 | "format": "none",
335 | "logBase": 1,
336 | "show": true
337 | }
338 | ],
339 | "yaxis": {
340 | "align": false
341 | }
342 | },
343 | {
344 | "aliasColors": {
345 | "2F6913DB:CAN:CH2:canmod-gps:Speed": "blue"
346 | },
347 | "bars": false,
348 | "dashLength": 10,
349 | "dashes": false,
350 | "datasource": {
351 | "type": "datasource",
352 | "uid": "-- Dashboard --"
353 | },
354 | "description": "",
355 | "fieldConfig": {
356 | "defaults": {
357 | "unit": "none"
358 | },
359 | "overrides": []
360 | },
361 | "fill": 1,
362 | "fillGradient": 0,
363 | "gridPos": {
364 | "h": 6,
365 | "w": 5,
366 | "x": 5,
367 | "y": 12
368 | },
369 | "hiddenSeries": false,
370 | "id": 6,
371 | "legend": {
372 | "avg": false,
373 | "current": false,
374 | "max": false,
375 | "min": false,
376 | "show": true,
377 | "total": false,
378 | "values": false
379 | },
380 | "lines": true,
381 | "linewidth": 1,
382 | "maxDataPoints": 400,
383 | "nullPointMode": "null",
384 | "options": {
385 | "alertThreshold": true
386 | },
387 | "percentage": false,
388 | "pluginVersion": "8.4.3",
389 | "pointradius": 2,
390 | "points": false,
391 | "renderer": "flot",
392 | "seriesOverrides": [
393 | {
394 | "alias": "2F6913DB:CAN:CH2:canmod-gps:SpeedAccuracy",
395 | "yaxis": 2
396 | },
397 | {
398 | "alias": "2F6913DB:CAN:CH2:canmod-gps:DistanceAccuracy",
399 | "yaxis": 2
400 | }
401 | ],
402 | "spaceLength": 10,
403 | "stack": false,
404 | "steppedLine": false,
405 | "targets": [
406 | {
407 | "datasource": {
408 | "type": "datasource",
409 | "uid": "-- Dashboard --"
410 | },
411 | "panelId": 4,
412 | "refId": "A"
413 | }
414 | ],
415 | "thresholds": [],
416 | "timeRegions": [],
417 | "title": "Trip distance (m)",
418 | "tooltip": {
419 | "shared": true,
420 | "sort": 0,
421 | "value_type": "individual"
422 | },
423 | "transformations": [
424 | {
425 | "id": "filterByRefId",
426 | "options": {
427 | "include": "DistanceTrip"
428 | }
429 | }
430 | ],
431 | "type": "graph",
432 | "xaxis": {
433 | "mode": "time",
434 | "show": true,
435 | "values": []
436 | },
437 | "yaxes": [
438 | {
439 | "format": "none",
440 | "logBase": 1,
441 | "show": true
442 | },
443 | {
444 | "format": "none",
445 | "logBase": 1,
446 | "show": true
447 | }
448 | ],
449 | "yaxis": {
450 | "align": false
451 | }
452 | },
453 | {
454 | "aliasColors": {
455 | "2F6913DB:CAN:CH2:canmod-gps:Speed": "blue"
456 | },
457 | "bars": false,
458 | "dashLength": 10,
459 | "dashes": false,
460 | "datasource": {
461 | "type": "datasource",
462 | "uid": "-- Dashboard --"
463 | },
464 | "description": "",
465 | "fieldConfig": {
466 | "defaults": {
467 | "unit": "none"
468 | },
469 | "overrides": []
470 | },
471 | "fill": 0,
472 | "fillGradient": 0,
473 | "gridPos": {
474 | "h": 6,
475 | "w": 5,
476 | "x": 10,
477 | "y": 12
478 | },
479 | "hiddenSeries": false,
480 | "id": 7,
481 | "legend": {
482 | "avg": false,
483 | "current": false,
484 | "max": false,
485 | "min": false,
486 | "show": true,
487 | "total": false,
488 | "values": false
489 | },
490 | "lines": true,
491 | "linewidth": 1,
492 | "maxDataPoints": 400,
493 | "nullPointMode": "null",
494 | "options": {
495 | "alertThreshold": true
496 | },
497 | "percentage": false,
498 | "pluginVersion": "8.4.3",
499 | "pointradius": 2,
500 | "points": false,
501 | "renderer": "flot",
502 | "seriesOverrides": [
503 | {
504 | "alias": "2F6913DB:CAN:CH2:canmod-gps:SpeedAccuracy",
505 | "yaxis": 2
506 | },
507 | {
508 | "alias": "2F6913DB:CAN:CH2:canmod-gps:DistanceAccuracy",
509 | "yaxis": 2
510 | },
511 | {
512 | "alias": "2F6913DB:CAN:CH2:canmod-gps:Heading",
513 | "yaxis": 2
514 | }
515 | ],
516 | "spaceLength": 10,
517 | "stack": false,
518 | "steppedLine": false,
519 | "targets": [
520 | {
521 | "datasource": {
522 | "type": "datasource",
523 | "uid": "-- Dashboard --"
524 | },
525 | "panelId": 4,
526 | "refId": "A"
527 | }
528 | ],
529 | "thresholds": [],
530 | "timeRegions": [],
531 | "title": "Attitude",
532 | "tooltip": {
533 | "shared": true,
534 | "sort": 0,
535 | "value_type": "individual"
536 | },
537 | "transformations": [
538 | {
539 | "id": "filterByRefId",
540 | "options": {
541 | "include": "Attitude"
542 | }
543 | }
544 | ],
545 | "type": "graph",
546 | "xaxis": {
547 | "mode": "time",
548 | "show": true,
549 | "values": []
550 | },
551 | "yaxes": [
552 | {
553 | "format": "none",
554 | "logBase": 1,
555 | "show": true
556 | },
557 | {
558 | "format": "none",
559 | "logBase": 1,
560 | "show": true
561 | }
562 | ],
563 | "yaxis": {
564 | "align": false
565 | }
566 | },
567 | {
568 | "datasource": {
569 | "type": "datasource",
570 | "uid": "-- Dashboard --"
571 | },
572 | "description": "",
573 | "fieldConfig": {
574 | "defaults": {
575 | "color": {
576 | "fixedColor": "#3d85c6",
577 | "mode": "fixed"
578 | },
579 | "mappings": [],
580 | "thresholds": {
581 | "mode": "absolute",
582 | "steps": [
583 | {
584 | "color": "green",
585 | "value": null
586 | },
587 | {
588 | "color": "red",
589 | "value": 80
590 | }
591 | ]
592 | }
593 | },
594 | "overrides": [
595 | {
596 | "matcher": {
597 | "id": "byName",
598 | "options": "Value"
599 | },
600 | "properties": [
601 | {
602 | "id": "color",
603 | "value": {
604 | "fixedColor": "blue",
605 | "mode": "fixed"
606 | }
607 | }
608 | ]
609 | }
610 | ]
611 | },
612 | "gridPos": {
613 | "h": 6,
614 | "w": 3,
615 | "x": 15,
616 | "y": 12
617 | },
618 | "id": 8,
619 | "maxDataPoints": 400,
620 | "options": {
621 | "colorMode": "background",
622 | "graphMode": "area",
623 | "justifyMode": "auto",
624 | "orientation": "auto",
625 | "reduceOptions": {
626 | "calcs": [
627 | "mean"
628 | ],
629 | "fields": "",
630 | "values": false
631 | },
632 | "textMode": "auto"
633 | },
634 | "pluginVersion": "8.4.3",
635 | "targets": [
636 | {
637 | "datasource": {
638 | "type": "datasource",
639 | "uid": "-- Dashboard --"
640 | },
641 | "panelId": 4,
642 | "refId": "A"
643 | }
644 | ],
645 | "title": "#Satellites (avg)",
646 | "transformations": [
647 | {
648 | "id": "filterByRefId",
649 | "options": {
650 | "include": "Sattelites"
651 | }
652 | }
653 | ],
654 | "type": "stat"
655 | },
656 | {
657 | "datasource": {
658 | "type": "datasource",
659 | "uid": "-- Dashboard --"
660 | },
661 | "description": "",
662 | "fieldConfig": {
663 | "defaults": {
664 | "color": {
665 | "fixedColor": "#3d85c6",
666 | "mode": "fixed"
667 | },
668 | "mappings": [],
669 | "thresholds": {
670 | "mode": "absolute",
671 | "steps": [
672 | {
673 | "color": "green",
674 | "value": null
675 | },
676 | {
677 | "color": "red",
678 | "value": 80
679 | }
680 | ]
681 | }
682 | },
683 | "overrides": [
684 | {
685 | "matcher": {
686 | "id": "byName",
687 | "options": "Value"
688 | },
689 | "properties": [
690 | {
691 | "id": "color",
692 | "value": {
693 | "fixedColor": "blue",
694 | "mode": "fixed"
695 | }
696 | }
697 | ]
698 | }
699 | ]
700 | },
701 | "gridPos": {
702 | "h": 6,
703 | "w": 3,
704 | "x": 18,
705 | "y": 12
706 | },
707 | "id": 9,
708 | "maxDataPoints": 400,
709 | "options": {
710 | "colorMode": "background",
711 | "graphMode": "area",
712 | "justifyMode": "auto",
713 | "orientation": "auto",
714 | "reduceOptions": {
715 | "calcs": [
716 | "lastNotNull"
717 | ],
718 | "fields": "",
719 | "values": false
720 | },
721 | "textMode": "auto"
722 | },
723 | "pluginVersion": "8.4.3",
724 | "targets": [
725 | {
726 | "datasource": {
727 | "type": "datasource",
728 | "uid": "-- Dashboard --"
729 | },
730 | "panelId": 4,
731 | "refId": "A"
732 | }
733 | ],
734 | "title": "Total distance (km)",
735 | "transformations": [
736 | {
737 | "id": "filterByRefId",
738 | "options": {
739 | "include": "DistanceTotal"
740 | }
741 | }
742 | ],
743 | "type": "stat"
744 | },
745 | {
746 | "datasource": {
747 | "type": "datasource",
748 | "uid": "-- Dashboard --"
749 | },
750 | "description": "",
751 | "fieldConfig": {
752 | "defaults": {
753 | "color": {
754 | "mode": "thresholds"
755 | },
756 | "mappings": [],
757 | "max": 1,
758 | "min": 0,
759 | "thresholds": {
760 | "mode": "absolute",
761 | "steps": [
762 | {
763 | "color": "green",
764 | "value": null
765 | },
766 | {
767 | "color": "red",
768 | "value": 80
769 | }
770 | ]
771 | }
772 | },
773 | "overrides": [
774 | {
775 | "matcher": {
776 | "id": "byName",
777 | "options": "Value"
778 | },
779 | "properties": [
780 | {
781 | "id": "color",
782 | "value": {
783 | "fixedColor": "blue",
784 | "mode": "fixed"
785 | }
786 | }
787 | ]
788 | },
789 | {
790 | "matcher": {
791 | "id": "byName",
792 | "options": "2F6913DB:CAN:CH2:canmod-gps:Fence1"
793 | },
794 | "properties": [
795 | {
796 | "id": "displayName",
797 | "value": "GF1"
798 | }
799 | ]
800 | },
801 | {
802 | "matcher": {
803 | "id": "byName",
804 | "options": "2F6913DB:CAN:CH2:canmod-gps:Fence2"
805 | },
806 | "properties": [
807 | {
808 | "id": "displayName",
809 | "value": "GF2"
810 | }
811 | ]
812 | },
813 | {
814 | "matcher": {
815 | "id": "byName",
816 | "options": "2F6913DB:CAN:CH2:canmod-gps:Fence3"
817 | },
818 | "properties": [
819 | {
820 | "id": "displayName",
821 | "value": "GF3"
822 | }
823 | ]
824 | },
825 | {
826 | "matcher": {
827 | "id": "byName",
828 | "options": "2F6913DB:CAN:CH2:canmod-gps:Fence4"
829 | },
830 | "properties": [
831 | {
832 | "id": "displayName",
833 | "value": "GF4"
834 | }
835 | ]
836 | }
837 | ]
838 | },
839 | "gridPos": {
840 | "h": 6,
841 | "w": 3,
842 | "x": 21,
843 | "y": 12
844 | },
845 | "id": 10,
846 | "maxDataPoints": 400,
847 | "options": {
848 | "displayMode": "basic",
849 | "orientation": "vertical",
850 | "reduceOptions": {
851 | "calcs": [
852 | "mean"
853 | ],
854 | "fields": "",
855 | "values": false
856 | },
857 | "showUnfilled": true
858 | },
859 | "pluginVersion": "8.4.3",
860 | "targets": [
861 | {
862 | "datasource": {
863 | "type": "datasource",
864 | "uid": "-- Dashboard --"
865 | },
866 | "panelId": 4,
867 | "refId": "A"
868 | }
869 | ],
870 | "title": "Geofence status",
871 | "transformations": [
872 | {
873 | "id": "filterByRefId",
874 | "options": {
875 | "include": "GeoFences"
876 | }
877 | }
878 | ],
879 | "type": "bargauge"
880 | }
881 | ],
882 | "refresh": false,
883 | "schemaVersion": 35,
884 | "style": "dark",
885 | "tags": [],
886 | "templating": {
887 | "list": [
888 | {
889 | "current": {
890 | "selected": false,
891 | "text": "2F6913DB",
892 | "value": "2F6913DB"
893 | },
894 | "datasource": {
895 | "type": "grafana-simple-json-datasource"
896 | },
897 | "definition": "{\"search\":\"device_name\"}",
898 | "hide": 0,
899 | "includeAll": false,
900 | "label": "DEVICE",
901 | "multi": false,
902 | "name": "DEVICE",
903 | "options": [],
904 | "query": "{\"search\":\"device_name\"}",
905 | "refresh": 1,
906 | "regex": "",
907 | "skipUrlSync": false,
908 | "sort": 1,
909 | "type": "query"
910 | },
911 | {
912 | "current": {
913 | "selected": false,
914 | "text": "canmod-gps",
915 | "value": "canmod-gps"
916 | },
917 | "datasource": {
918 | "type": "grafana-simple-json-datasource"
919 | },
920 | "definition": "{\"search\":\"db\"}",
921 | "hide": 0,
922 | "includeAll": false,
923 | "label": "DB",
924 | "multi": false,
925 | "name": "DB",
926 | "options": [],
927 | "query": "{\"search\":\"db\"}",
928 | "refresh": 1,
929 | "regex": "",
930 | "skipUrlSync": false,
931 | "sort": 0,
932 | "type": "query"
933 | },
934 | {
935 | "current": {
936 | "selected": false,
937 | "text": "CH2",
938 | "value": "CH2"
939 | },
940 | "datasource": {
941 | "type": "grafana-simple-json-datasource"
942 | },
943 | "definition": "{\"search\":\"chn\"}",
944 | "hide": 0,
945 | "includeAll": false,
946 | "label": "CHN",
947 | "multi": false,
948 | "name": "CHN",
949 | "options": [],
950 | "query": "{\"search\":\"chn\"}",
951 | "refresh": 1,
952 | "regex": "",
953 | "skipUrlSync": false,
954 | "sort": 0,
955 | "type": "query"
956 | },
957 | {
958 | "current": {
959 | "selected": false,
960 | "text": "CAN",
961 | "value": "CAN"
962 | },
963 | "datasource": {
964 | "type": "grafana-simple-json-datasource"
965 | },
966 | "definition": "{\"search\":\"itf\"}",
967 | "hide": 0,
968 | "includeAll": false,
969 | "label": "ITF",
970 | "multi": false,
971 | "name": "ITF",
972 | "options": [],
973 | "query": "{\"search\":\"itf\"}",
974 | "refresh": 1,
975 | "regex": "",
976 | "skipUrlSync": false,
977 | "sort": 0,
978 | "type": "query"
979 | },
980 | {
981 | "current": {
982 | "selected": true,
983 | "text": [
984 | "AccelerationX",
985 | "AccelerationY",
986 | "AccelerationZ"
987 | ],
988 | "value": [
989 | "AccelerationX",
990 | "AccelerationY",
991 | "AccelerationZ"
992 | ]
993 | },
994 | "datasource": {
995 | "type": "grafana-simple-json-datasource"
996 | },
997 | "definition": "{\"search\":\"signal\", \"db\": \"${DB}\"}",
998 | "hide": 0,
999 | "includeAll": false,
1000 | "label": "SIGNAL",
1001 | "multi": true,
1002 | "name": "SIGNAL",
1003 | "options": [],
1004 | "query": "{\"search\":\"signal\", \"db\": \"${DB}\"}",
1005 | "refresh": 1,
1006 | "regex": "",
1007 | "skipUrlSync": false,
1008 | "sort": 0,
1009 | "type": "query"
1010 | }
1011 | ]
1012 | },
1013 | "time": {
1014 | "from": "2022-01-08T10:33:37.186Z",
1015 | "to": "2022-01-08T10:51:16.573Z"
1016 | },
1017 | "timepicker": {},
1018 | "timezone": "",
1019 | "title": "CANedge Grafana Backend - Template",
1020 | "uid": "u1TFGpL7z",
1021 | "version": 42,
1022 | "weekStart": ""
1023 | }
--------------------------------------------------------------------------------
/dashboard_templates/dashboard-template-simple.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": "-- Grafana --",
7 | "enable": true,
8 | "hide": true,
9 | "iconColor": "rgba(0, 211, 255, 1)",
10 | "name": "Annotations & Alerts",
11 | "target": {
12 | "limit": 100,
13 | "matchAny": false,
14 | "tags": [],
15 | "type": "dashboard"
16 | },
17 | "type": "dashboard"
18 | },
19 | {
20 | "enable": true,
21 | "iconColor": "dark-orange",
22 | "name": "Session",
23 | "query": "{\"annotation\": \"session\", \"device\": \"${DEVICE}\"}"
24 | },
25 | {
26 | "enable": true,
27 | "iconColor": "#ffce6c",
28 | "name": "Split",
29 | "query": "{\"annotation\": \"split\", \"device\": \"${DEVICE}\"}"
30 | }
31 | ]
32 | },
33 | "description": "",
34 | "editable": true,
35 | "fiscalYearStartMonth": 0,
36 | "graphTooltip": 0,
37 | "id": 7,
38 | "iteration": 1647610719376,
39 | "links": [],
40 | "liveNow": false,
41 | "panels": [
42 | {
43 | "aliasColors": {},
44 | "bars": false,
45 | "dashLength": 10,
46 | "dashes": false,
47 | "description": "",
48 | "fill": 1,
49 | "fillGradient": 0,
50 | "gridPos": {
51 | "h": 15,
52 | "w": 24,
53 | "x": 0,
54 | "y": 0
55 | },
56 | "hiddenSeries": false,
57 | "id": 4,
58 | "legend": {
59 | "avg": false,
60 | "current": false,
61 | "max": false,
62 | "min": false,
63 | "show": true,
64 | "total": false,
65 | "values": false
66 | },
67 | "lines": true,
68 | "linewidth": 1,
69 | "maxDataPoints": 400,
70 | "nullPointMode": "null",
71 | "options": {
72 | "alertThreshold": true
73 | },
74 | "percentage": false,
75 | "pluginVersion": "8.4.3",
76 | "pointradius": 0.5,
77 | "points": true,
78 | "renderer": "flot",
79 | "seriesOverrides": [],
80 | "spaceLength": 10,
81 | "stack": false,
82 | "steppedLine": false,
83 | "targets": [
84 | {
85 | "datasource": {
86 | "type": "grafana-simple-json-datasource"
87 | },
88 | "hide": false,
89 | "refId": "Variables",
90 | "target": "{\"device\":\"${DEVICE}\",\"itf\":\"${ITF}\",\"chn\":\"${CHN}\",\"db\":\"${DB}\",\"signal\":\"${SIGNAL}\"}",
91 | "type": "timeserie"
92 | }
93 | ],
94 | "thresholds": [],
95 | "timeRegions": [],
96 | "title": "${SIGNAL}",
97 | "tooltip": {
98 | "shared": true,
99 | "sort": 0,
100 | "value_type": "individual"
101 | },
102 | "transformations": [
103 | {
104 | "id": "filterByRefId",
105 | "options": {
106 | "include": "Variables"
107 | }
108 | }
109 | ],
110 | "type": "graph",
111 | "xaxis": {
112 | "mode": "time",
113 | "show": true,
114 | "values": []
115 | },
116 | "yaxes": [
117 | {
118 | "format": "short",
119 | "logBase": 1,
120 | "show": true
121 | },
122 | {
123 | "format": "short",
124 | "logBase": 1,
125 | "show": true
126 | }
127 | ],
128 | "yaxis": {
129 | "align": false
130 | }
131 | }
132 | ],
133 | "refresh": false,
134 | "schemaVersion": 35,
135 | "style": "dark",
136 | "tags": [],
137 | "templating": {
138 | "list": [
139 | {
140 | "current": {
141 | "selected": true
142 | },
143 | "datasource": {
144 | "type": "grafana-simple-json-datasource"
145 | },
146 | "definition": "{\"search\":\"device_name\"}",
147 | "hide": 0,
148 | "includeAll": false,
149 | "label": "DEVICE",
150 | "multi": false,
151 | "name": "DEVICE",
152 | "options": [],
153 | "query": "{\"search\":\"device_name\"}",
154 | "refresh": 1,
155 | "regex": "",
156 | "skipUrlSync": false,
157 | "sort": 1,
158 | "type": "query"
159 | },
160 | {
161 | "current": {
162 | "selected": true
163 | },
164 | "datasource": {
165 | "type": "grafana-simple-json-datasource"
166 | },
167 | "definition": "{\"search\":\"db\"}",
168 | "hide": 0,
169 | "includeAll": false,
170 | "label": "DB",
171 | "multi": false,
172 | "name": "DB",
173 | "options": [],
174 | "query": "{\"search\":\"db\"}",
175 | "refresh": 1,
176 | "regex": "",
177 | "skipUrlSync": false,
178 | "sort": 0,
179 | "type": "query"
180 | },
181 | {
182 | "current": {
183 | "selected": true,
184 | "text": "CH1",
185 | "value": "CH1"
186 | },
187 | "datasource": {
188 | "type": "grafana-simple-json-datasource"
189 | },
190 | "definition": "{\"search\":\"chn\"}",
191 | "hide": 0,
192 | "includeAll": false,
193 | "label": "CHN",
194 | "multi": false,
195 | "name": "CHN",
196 | "options": [],
197 | "query": "{\"search\":\"chn\"}",
198 | "refresh": 1,
199 | "regex": "",
200 | "skipUrlSync": false,
201 | "sort": 0,
202 | "type": "query"
203 | },
204 | {
205 | "current": {
206 | "selected": false,
207 | "text": "CAN",
208 | "value": "CAN"
209 | },
210 | "datasource": {
211 | "type": "grafana-simple-json-datasource"
212 | },
213 | "definition": "{\"search\":\"itf\"}",
214 | "hide": 0,
215 | "includeAll": false,
216 | "label": "ITF",
217 | "multi": false,
218 | "name": "ITF",
219 | "options": [],
220 | "query": "{\"search\":\"itf\"}",
221 | "refresh": 1,
222 | "regex": "",
223 | "skipUrlSync": false,
224 | "sort": 0,
225 | "type": "query"
226 | },
227 | {
228 | "current": {
229 | "selected": true
230 | },
231 | "datasource": {
232 | "type": "grafana-simple-json-datasource"
233 | },
234 | "definition": "{\"search\":\"signal\", \"db\": \"${DB}\"}",
235 | "hide": 0,
236 | "includeAll": false,
237 | "label": "SIGNAL",
238 | "multi": true,
239 | "name": "SIGNAL",
240 | "options": [],
241 | "query": "{\"search\":\"signal\", \"db\": \"${DB}\"}",
242 | "refresh": 1,
243 | "regex": "",
244 | "skipUrlSync": false,
245 | "sort": 0,
246 | "type": "query"
247 | }
248 | ]
249 | },
250 | "time": {
251 | "from": "now-12h",
252 | "to": "now"
253 | },
254 | "timepicker": {},
255 | "timezone": "",
256 | "title": "CANedge Grafana Backend - Template (Simple)",
257 | "uid": "u1TFGpL7a",
258 | "version": 1,
259 | "weekStart": ""
260 | }
--------------------------------------------------------------------------------
/install.bat:
--------------------------------------------------------------------------------
1 | python -m venv env & env\Scripts\activate & pip install -r requirements.txt
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiobotocore==2.4.0
2 | aiohttp==3.8.1
3 | aioitertools==0.10.0
4 | aiosignal==1.2.0
5 | async-timeout==4.0.2
6 | asynctest==0.13.0
7 | attrs==21.4.0
8 | botocore==1.27.59
9 | can-decoder>=0.1.9
10 | canedge-browser>=0.0.8
11 | canmatrix==0.9.5
12 | charset-normalizer==2.0.12
13 | click==8.1.3
14 | colorama==0.4.4
15 | Flask==2.1.2
16 | Flask-Caching==1.10.1
17 | frozenlist==1.3.0
18 | fsspec==2022.8.2
19 | future==0.18.2
20 | idna==3.3
21 | importlib-metadata==4.11.3
22 | itsdangerous==2.1.2
23 | Jinja2==3.1.2
24 | jmespath==1.0.0
25 | MarkupSafe==2.1.1
26 | mdf-iter>=2.1.1
27 | multidict==6.0.2
28 | numpy==1.23.2
29 | pandas==1.4.3
30 | python-dateutil==2.8.2
31 | pytz==2022.1
32 | s3fs==2022.8.2
33 | six==1.16.0
34 | typing-extensions==4.2.0
35 | urllib3==1.26.9
36 | waitress==2.1.1
37 | Werkzeug==2.1.2
38 | wrapt==1.14.1
39 | yarl==1.7.2
40 | zipp==3.8.0
41 |
--------------------------------------------------------------------------------
/run_local.bat:
--------------------------------------------------------------------------------
1 | env\Scripts\activate & python canedge_datasource_cli.py "file:///%cd%/LOG" --port 8080 --limit 100
--------------------------------------------------------------------------------
/run_s3.bat:
--------------------------------------------------------------------------------
1 | env\Scripts\activate & python canedge_datasource_cli.py endpoint --port 8080 --limit 100 --s3_ak access_key --s3_sk secret_key --s3_bucket bucket
--------------------------------------------------------------------------------
/test/test_canedge_time_series.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import can_decoder
3 | import canedge_browser
4 | import pytest
5 | import pytz
6 |
7 | from canedge_datasource.enums import CanedgeInterface, CanedgeChannel, SampleMethod
8 | from canedge_datasource.query import get_time_series, SignalQuery
9 |
10 |
11 | class TestCanedgeTimeSeries(object):
12 |
13 | @pytest.fixture
14 | def fs(self):
15 | fs = canedge_browser.LocalFileSystem(base_path="root/")
16 | return fs
17 |
18 | def test_time_series(self, fs):
19 |
20 | obd = can_decoder.load_dbc("root/obd.dbc")
21 |
22 | signal_name = "S1_PID_0D_VehicleSpeed"
23 | start_date = datetime(year=2020, month=8, day=4, hour=10).replace(tzinfo=pytz.UTC)
24 | stop_date = datetime(year=2022, month=9, day=9, hour=10).replace(tzinfo=pytz.UTC)
25 |
26 | sampling_ms = 1000
27 |
28 | signal_queries = []
29 |
30 | signal_queries.append(SignalQuery(target="A",
31 | device="3BA199E2",
32 | itf=CanedgeInterface.CAN,
33 | chn=CanedgeChannel.CH1,
34 | db=obd,
35 | signal_name="S1_PID_0C_EngineRPM",
36 | interval_ms=sampling_ms,
37 | method=SampleMethod.NEAREST))
38 |
39 | signal_queries.append(SignalQuery(target="B",
40 | device="3BA199E2",
41 | itf=CanedgeInterface.CAN,
42 | chn=CanedgeChannel.CH1,
43 | db=obd,
44 | signal_name="S1_PID_0D_VehicleSpeed",
45 | interval_ms=sampling_ms,
46 | method=SampleMethod.NEAREST))
47 |
48 | signal_queries.append(SignalQuery(target="C",
49 | device="3BA199E2",
50 | itf=CanedgeInterface.CAN,
51 | chn=CanedgeChannel.CH2,
52 | db=obd,
53 | signal_name="S1_PID_0D_VehicleSpeed",
54 | interval_ms=sampling_ms,
55 | method=SampleMethod.NEAREST))
56 |
57 | signal_queries.append(SignalQuery(target="D",
58 | device="3BA199E2",
59 | itf=CanedgeInterface.CAN,
60 | chn=CanedgeChannel.CH2,
61 | db=obd,
62 | signal_name="S1_PID_0D_VehicleSpeed",
63 | interval_ms=sampling_ms,
64 | method=SampleMethod.NEAREST))
65 |
66 | signal_queries.append(SignalQuery(target="E",
67 | device="3BA199E2",
68 | itf=CanedgeInterface.LIN,
69 | chn=CanedgeChannel.CH1,
70 | db=obd,
71 | signal_name="S1_PID_0D_VehicleSpeed",
72 | interval_ms=sampling_ms,
73 | method=SampleMethod.NEAREST))
74 |
75 | signal_queries.append(SignalQuery(target="F",
76 | device="AABBCCDD",
77 | itf=CanedgeInterface.CAN,
78 | chn=CanedgeChannel.CH1,
79 | db=obd,
80 | signal_name="S1_PID_0D_VehicleSpeed",
81 | interval_ms=sampling_ms,
82 | method=SampleMethod.NEAREST))
83 |
84 | time_series = get_time_series(fs, signal_queries, start_date, stop_date)
85 |
86 | #assert start_date.timestamp() <= time_series[0][1] / 1000, "Start time error"
87 | #assert stop_date.timestamp() >= time_series[-1][1] / 1000, "Stop time error"
88 | #assert time_series[1][1] - time_series[0][1] >= interval_ms, "Period error"
89 |
90 | for a in time_series:
91 | print(a)
92 |
93 | #print(len(time_series))
94 | #print(time_series[0])
95 | #print(time_series[-1])
96 |
--------------------------------------------------------------------------------
/test/test_resample.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import matplotlib.pyplot as plt
3 | import pandas as pd
4 | import numpy as np
5 | import pytest
6 |
7 |
8 | class TestResample(object):
9 |
10 | @pytest.mark.parametrize("interval_ms", [10, 50, 100, 115, 550, 755, 1000])
11 | def test_resampling(self, interval_ms):
12 | df = pd.DataFrame()
13 |
14 | df["time"] = [
15 | datetime(2000, 1, 1, 0, 0, 0, 100000), # 100 ms
16 | datetime(2000, 1, 1, 0, 0, 0, 200000),
17 | datetime(2000, 1, 1, 0, 0, 0, 300000),
18 | datetime(2000, 1, 1, 0, 0, 0, 400000),
19 | datetime(2000, 1, 1, 0, 0, 0, 500000),
20 | datetime(2000, 1, 1, 0, 0, 0, 600000),
21 | datetime(2000, 1, 1, 0, 0, 0, 700000),
22 | datetime(2000, 1, 1, 0, 0, 0, 800000),
23 | datetime(2000, 1, 1, 0, 0, 0, 900000),
24 | datetime(2000, 1, 1, 0, 0, 1, 000000), # 200 ms
25 | datetime(2000, 1, 1, 0, 0, 1, 200000),
26 | datetime(2000, 1, 1, 0, 0, 1, 400000),
27 | datetime(2000, 1, 1, 0, 0, 1, 600000),
28 | datetime(2000, 1, 1, 0, 0, 1, 800000),
29 | datetime(2000, 1, 1, 0, 0, 3, 000000), # Missing 1 s, then 100 ms
30 | datetime(2000, 1, 1, 0, 0, 3, 100000),
31 | datetime(2000, 1, 1, 0, 0, 3, 200000),
32 | datetime(2000, 1, 1, 0, 0, 3, 300000),
33 | datetime(2000, 1, 1, 0, 0, 3, 400000),
34 | datetime(2000, 1, 1, 0, 0, 3, 500000),
35 | datetime(2000, 1, 1, 0, 0, 3, 600000),
36 | datetime(2000, 1, 1, 0, 0, 3, 700000),
37 | datetime(2000, 1, 1, 0, 0, 3, 800000),
38 | datetime(2000, 1, 1, 0, 0, 3, 900000)]
39 |
40 | df['time_orig'] = df['time']
41 | df["signal"] = np.sin(np.linspace(0, 2 * np.pi, len(df["time"])))
42 |
43 | df.set_index('time', inplace=True)
44 |
45 | df_resample = df.resample(rule=f"{interval_ms}ms").nearest()
46 |
47 | print(len(df))
48 | print(len(df_resample))
49 | df_resample.drop_duplicates(subset='time_orig', inplace=True)
50 | df_resample.dropna(axis=0, how='any', inplace=True)
51 | print(len(df_resample))
52 |
53 | assert len(df) >= len(df_resample)
54 |
55 | df.reset_index(inplace=True)
56 | df_resample.reset_index(inplace=True)
57 |
58 | fig = plt.figure()
59 | ax = fig.add_subplot(111)
60 | ax.scatter(df['time'], df['signal'], s=30, c='b', label='Orig')
61 | ax.scatter(df_resample['time_orig'], df_resample['signal'], s=10, c='r', label='Resample')
62 | plt.xticks(rotation=90)
63 | plt.title(f"Interface: {interval_ms} ms")
64 | plt.show()
65 |
66 | #print(df)
67 | #print(df_resample)
--------------------------------------------------------------------------------
/test/test_signals.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | import pandas as pd
3 | import numpy as np
4 | import pytest
5 | import can_decoder
6 |
7 | class TestSignals(object):
8 |
9 | def get_db_signals(self, db) -> list:
10 | def get_signal_recursive(signals) -> list:
11 | signal_list = []
12 | for item in signals:
13 | if isinstance(signals, dict):
14 | signal_list.extend(get_signal_recursive(signals[item]))
15 | if isinstance(signals, list):
16 | signal_list.append(item.name)
17 | signal_list.extend(get_signal_recursive(item.signals))
18 | return signal_list
19 | signals = []
20 | for key, value in db.frames.items():
21 | signals.extend(get_signal_recursive(value.signals))
22 | return signals
23 |
24 |
25 | def test_signals(self):
26 |
27 | db = can_decoder.load_dbc("root/obd.dbc")
28 |
29 | a = self.get_db_signals(db)
30 |
31 |
32 | print(a)
--------------------------------------------------------------------------------
/test/tester_browser.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timezone, timedelta
2 |
3 | import canedge_browser
4 | from mdf_iter import mdf_iter
5 |
6 | if __name__ == '__main__':
7 |
8 | fs = canedge_browser.LocalFileSystem(base_path="root")
9 |
10 | start_date = datetime(2021, 3, 26, 6, 0, 0, tzinfo=timezone.utc)
11 | stop_date = datetime(2021, 3, 26, 18, 0, 0, tzinfo=timezone.utc)
12 |
13 | for offset in range(0, 2):
14 |
15 | print(f"Offset: {offset}")
16 |
17 | stop_date_offset = stop_date + timedelta(minutes=offset)
18 |
19 | log_files = canedge_browser.get_log_files(fs, "79A2DD1A", start_date=start_date, stop_date=stop_date)
20 | log_files.sort()
21 |
22 | for log_file in log_files:
23 | with fs.open(log_file, "rb") as handle:
24 | mdf_file = mdf_iter.MdfFile(handle)
25 | df_raw = mdf_file.get_data_frame()
26 | start_time = df_raw.head(1).index.values[0]
27 | print(f"{log_file}, {start_time}")
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | def setup_fs(s3, key="", secret="", endpoint="", region="",cert="", passwords={}):
2 | """Given a boolean specifying whether to use local disk or S3, setup filesystem
3 | Syntax examples: AWS (http://s3.us-east-2.amazonaws.com), MinIO (http://192.168.0.1:9000)
4 | The cert input is relevant if you're using MinIO with TLS enabled, for specifying the path to the certficiate.
5 | For MinIO you should also parse the region_name
6 |
7 | The block_size is set to accomodate files up to 55 MB in size. If your log files are larger, adjust this value accordingly
8 | """
9 |
10 | if s3:
11 | import s3fs
12 |
13 | block_size = 55 * 1024 * 1024
14 |
15 | if "amazonaws" in endpoint:
16 | fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size)
17 | elif cert != "":
18 | fs = s3fs.S3FileSystem(
19 | key=key,
20 | secret=secret,
21 | client_kwargs={"endpoint_url": endpoint, "verify": cert, "region_name": region},
22 | default_block_size=block_size,
23 | )
24 | else:
25 | fs = s3fs.S3FileSystem(
26 | key=key,
27 | secret=secret,
28 | client_kwargs={"endpoint_url": endpoint, "region_name": region},
29 | default_block_size=block_size,
30 | )
31 |
32 | else:
33 | from pathlib import Path
34 | import canedge_browser
35 |
36 | base_path = Path(__file__).parent
37 | fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords)
38 |
39 | return fs
40 |
41 |
42 | # -----------------------------------------------
43 | def load_dbc_files(dbc_paths):
44 | """Given a list of DBC file paths, create a list of conversion rule databases"""
45 | import can_decoder
46 | from pathlib import Path
47 |
48 | db_list = []
49 | for dbc in dbc_paths:
50 | db = can_decoder.load_dbc(Path(__file__).parent / dbc)
51 | db_list.append(db)
52 |
53 | return db_list
54 |
55 |
56 | # -----------------------------------------------
57 | def list_log_files(fs, devices, start_times, verbose=True, passwords={}):
58 | """Given a list of device paths, list log files from specified filesystem.
59 | Data is loaded based on the list of start datetimes
60 | """
61 | import canedge_browser
62 |
63 | log_files = []
64 |
65 | if len(start_times):
66 | for idx, device in enumerate(devices):
67 | start = start_times[idx]
68 | log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)
69 | log_files.extend(log_files_device)
70 |
71 | if verbose:
72 | print(f"Found {len(log_files)} log files\n")
73 |
74 | return log_files
75 |
76 | def add_signal_prefix(df_phys, can_id_prefix=False, pgn_prefix=False, bus_prefix=False):
77 | """Rename Signal names by prefixing the full
78 | CAN ID (in hex) and/or J1939 PGN
79 | """
80 | from J1939_PGN import J1939_PGN
81 |
82 | if df_phys.empty:
83 | return df_phys
84 | else:
85 | prefix = ""
86 | if bus_prefix:
87 | prefix += df_phys["BusChannel"].apply(lambda x: f"{x}.")
88 | if can_id_prefix:
89 | prefix += df_phys["CAN ID"].apply(lambda x: f"{hex(int(x))[2:].upper()}." )
90 | if pgn_prefix:
91 | prefix += df_phys["CAN ID"].apply(lambda x: f"{J1939_PGN(int(x)).pgn}.")
92 |
93 | df_phys["Signal"] = prefix + df_phys["Signal"]
94 |
95 | return df_phys
96 |
97 | def restructure_data(df_phys, res, ffill=False):
98 | """Restructure the decoded data to a resampled
99 | format where each column reflects a Signal
100 | """
101 | import pandas as pd
102 |
103 | if not df_phys.empty and res != "":
104 | df_phys = df_phys.pivot_table(values="Physical Value", index=pd.Grouper(freq=res), columns="Signal")
105 |
106 | if ffill:
107 | df_phys = df_phys.ffill()
108 |
109 | return df_phys
110 |
111 |
112 | def test_signal_threshold(df_phys, signal, threshold):
113 | """Illustrative example for how to extract a signal and evaluate statistical values
114 | vs. defined thresholds. The function can be easily modified for your needs.
115 | """
116 | df_signal = df_phys[df_phys["Signal"] == signal]["Physical Value"]
117 |
118 | stats = df_signal.agg(["count", "min", "max", "mean", "std"])
119 | delta = stats["max"] - stats["min"]
120 |
121 | if delta > threshold:
122 | print(f"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}")
123 |
124 |
125 | def add_custom_sig(df_phys, signal1, signal2, function, new_signal):
126 | """Helper function for calculating a new signal based on two signals and a function.
127 | Returns a dataframe with the new signal name and physical values
128 | """
129 | import pandas as pd
130 |
131 | try:
132 | s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1)
133 | s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2)
134 |
135 | df_new_sig = pd.merge_ordered(
136 | s1,
137 | s2,
138 | on="TimeStamp",
139 | fill_method="ffill",
140 | ).set_index("TimeStamp")
141 | df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame()
142 | df_new_sig["Signal"] = new_signal
143 | df_phys = df_phys.append(df_new_sig)
144 |
145 | except:
146 | print(f"Warning: Custom signal {new_signal} not created\n")
147 |
148 | return df_phys
149 |
150 |
151 | # -----------------------------------------------
152 | class ProcessData:
153 | def __init__(self, fs, db_list, signals=[], days_offset=None, verbose=True):
154 | from datetime import datetime, timedelta
155 |
156 | self.db_list = db_list
157 | self.signals = signals
158 | self.fs = fs
159 | self.days_offset = days_offset
160 | self.verbose = verbose
161 |
162 | if self.verbose == True and self.days_offset != None:
163 | date_offset = (datetime.today() - timedelta(days=self.days_offset)).strftime("%Y-%m-%d")
164 | print(
165 | f"Warning: days_offset = {self.days_offset}, meaning data is offset to start at {date_offset}.\nThis is intended for sample data testing only. Set days_offset = None when processing your own data."
166 | )
167 |
168 | return
169 |
170 | def extract_phys(self, df_raw):
171 | """Given df of raw data and list of decoding databases, create new def with
172 | physical values (no duplicate signals and optionally filtered/rebaselined)
173 | """
174 | import can_decoder
175 | import pandas as pd
176 |
177 | df_phys = pd.DataFrame()
178 | df_phys_temp = []
179 | for db in self.db_list:
180 | df_decoder = can_decoder.DataFrameDecoder(db)
181 |
182 | for bus, bus_group in df_raw.groupby("BusChannel"):
183 | for length, group in bus_group.groupby("DataLength"):
184 | df_phys_group = df_decoder.decode_frame(group)
185 | if not df_phys_group.empty:
186 | df_phys_group["BusChannel"] = bus
187 | df_phys_temp.append(df_phys_group)
188 |
189 | df_phys = pd.concat(df_phys_temp, ignore_index=False).sort_index()
190 |
191 | # remove duplicates in case multiple DBC files contain identical signals
192 | df_phys["datetime"] = df_phys.index
193 | df_phys = df_phys.drop_duplicates(keep="first")
194 | df_phys = df_phys.drop(labels="datetime", axis=1)
195 |
196 | # optionally filter and rebaseline the data
197 | df_phys = self.filter_signals(df_phys)
198 |
199 | if not df_phys.empty and type(self.days_offset) == int:
200 | df_phys = self.rebaseline_data(df_phys)
201 |
202 | return df_phys
203 |
204 | def rebaseline_data(self, df_phys):
205 | """Given a df of physical values, this offsets the timestamp
206 | to be equal to today, minus a given number of days.
207 | """
208 | from datetime import datetime, timezone
209 | import pandas as pd
210 |
211 | delta_days = (datetime.now(timezone.utc) - df_phys.index.min()).days - self.days_offset
212 | df_phys.index = df_phys.index + pd.Timedelta(delta_days, "day")
213 |
214 | return df_phys
215 |
216 | def filter_signals(self, df_phys):
217 | """Given a df of physical values, return only signals matched by filter"""
218 | if not df_phys.empty and len(self.signals):
219 | df_phys = df_phys[df_phys["Signal"].isin(self.signals)]
220 |
221 | return df_phys
222 |
223 | def get_raw_data(self, log_file, passwords={},lin=False):
224 | """Extract a df of raw data and device ID from log file.
225 | Optionally include LIN bus data by setting lin=True
226 | """
227 | import mdf_iter
228 |
229 | with self.fs.open(log_file, "rb") as handle:
230 | mdf_file = mdf_iter.MdfFile(handle, passwords=passwords)
231 | device_id = self.get_device_id(mdf_file)
232 |
233 | if lin:
234 | df_raw_lin = mdf_file.get_data_frame_lin()
235 | df_raw_lin["IDE"] = 0
236 | df_raw_can = mdf_file.get_data_frame()
237 | df_raw = df_raw_can.append(df_raw_lin)
238 | else:
239 | df_raw = mdf_file.get_data_frame()
240 |
241 | return df_raw, device_id
242 |
243 | def get_device_id(self, mdf_file):
244 | return mdf_file.get_metadata()["HDcomment.Device Information.serial number"]["value_raw"]
245 |
246 | def print_log_summary(self, device_id, log_file, df_phys):
247 | """Print summary information for each log file"""
248 | if self.verbose:
249 | print(
250 | "\n---------------",
251 | f"\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\n",
252 | )
253 |
254 |
255 | # -----------------------------------------------
256 | class MultiFrameDecoder:
257 |
258 | """Class for handling transport protocol data. For each response ID, identify
259 | sequences of subsequent frames and combine the relevant parts of the data payloads
260 | into a single payload with the relevant CAN ID. The original raw dataframe is
261 | then cleansed of the original response ID sequence frames. Instead, the new reassembled
262 | frames are inserted.
263 |
264 | :param tp_type: the class supports UDS ("uds"), NMEA 2000 Fast Packets ("nmea") and J1939 ("j1939")
265 | :param df_raw: dataframe of raw CAN data from the mdf_iter module
266 |
267 | SINGLE_FRAME_MASK: mask used in matching single frames
268 | FIRST_FRAME_MASK: mask used in matching first frames
269 | CONSEQ_FRAME_MASK: mask used in matching consequtive frames
270 | SINGLE_FRAME: frame type reflecting a single frame response
271 | FIRST_FRAME: frame type reflecting the first frame in a multi frame response
272 | CONSEQ_FRAME: frame type reflecting a consequtive frame in a multi frame response
273 | ff_payload_start: the combined payload will start at this byte in the FIRST_FRAME
274 | bam_pgn: this is used in J1939 and marks the initial BAM message ID in DEC
275 | res_id_list: TP 'response CAN IDs' to process
276 |
277 | """
278 | FRAME_STRUCT = {
279 | "": {},
280 | "uds": {
281 | "SINGLE_FRAME_MASK": 0xF0,
282 | "FIRST_FRAME_MASK": 0xF0,
283 | "CONSEQ_FRAME_MASK": 0xF0,
284 | "SINGLE_FRAME": 0x00,
285 | "FIRST_FRAME": 0x10,
286 | "CONSEQ_FRAME": 0x20,
287 | "ff_payload_start": 1,
288 | "bam_pgn": -1,
289 | "res_id_list": [1960, 2016, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2026, 1979, 1992, 1998, 2001, 402522235],
290 | "group": "ID"
291 | },
292 | "j1939": {
293 | "SINGLE_FRAME_MASK": 0xFF,
294 | "FIRST_FRAME_MASK": 0xFF,
295 | "CONSEQ_FRAME_MASK": 0x00,
296 | "SINGLE_FRAME": 0xFF,
297 | "FIRST_FRAME": 0x20,
298 | "CONSEQ_FRAME": 0x00,
299 | "ff_payload_start": 8,
300 | "bam_pgn": 60416,
301 | "res_id_list": [60416, 60160],
302 | "group": "SA"
303 | },
304 | "nmea": {
305 | "SINGLE_FRAME_MASK": 0xFF,
306 | "FIRST_FRAME_MASK": 0x1F,
307 | "CONSEQ_FRAME_MASK": 0x00,
308 | "SINGLE_FRAME": 0xFF,
309 | "FIRST_FRAME": 0x00,
310 | "CONSEQ_FRAME": 0x00,
311 | "ff_payload_start": 2,
312 | "bam_pgn": -1,
313 | "res_id_list":[126983, 126984, 126985, 126986, 126987, 126988, 126996, 127233, 127237, 127489, 127496, 127497, 127503, 127504, 127506, 127751, 128275, 128520, 128538, 129029, 129038, 129039, 129040, 129041, 129044, 129284, 129285, 129301, 129302, 129538, 129540, 129541, 129542, 129545, 129547, 129549, 129551, 129556, 129792, 129793, 129794, 129795, 129796, 129798, 129799, 129800, 129801, 129803, 129804, 129805, 129806, 129807, 129808, 129809, 129810, 129811, 129812, 129813, 129814, 129815, 129816, 130052, 130053, 130054, 130060, 130061, 130064, 130065, 130067, 130068, 130069, 130070, 130071, 130072, 130073, 130074, 130320, 130321, 130322, 130323, 130324, 130564, 130565, 130567, 130569, 130571, 130575, 130577, 130578, 130581, 130584, 130586],
314 | "group": "ID"
315 | }}
316 |
317 | def __init__(self, tp_type=""):
318 | self.tp_type = tp_type
319 | return
320 |
321 | def calculate_pgn(self, frame_id):
322 | pgn = (frame_id & 0x03FFFF00) >> 8
323 | pgn_f = pgn & 0xFF00
324 | if pgn_f < 0xF000:
325 | pgn &= 0xFFFFFF00
326 | return pgn
327 |
328 | def calculate_sa(self, frame_id):
329 | sa = frame_id & 0x000000FF
330 | return sa
331 |
332 | def construct_new_tp_frame(self, base_frame, payload_concatenated, can_id):
333 | new_frame = base_frame.copy()
334 | new_frame["DataBytes"] = payload_concatenated
335 | new_frame["DLC"] = 0
336 | new_frame["DataLength"] = len(payload_concatenated)
337 | if can_id:
338 | new_frame["ID"] = can_id
339 | return new_frame
340 |
341 | def identify_matching_ids(self,df_raw,res_id_list_full, bam_pgn):
342 | # identify which CAN IDs (or PGNs) match the TP IDs and create a filtered df_raw_match
343 | # which is used to separate the df_raw into two parts: Incl/excl TP frames.
344 | # Also produces a reduced res_id_list that only contains relevant ID entries
345 | if self.tp_type == "nmea":
346 | df_raw_pgns = df_raw["ID"].apply(self.calculate_pgn)
347 | df_raw_match = df_raw_pgns.isin(res_id_list_full)
348 | res_id_list = df_raw_pgns[df_raw_match].drop_duplicates().values.tolist()
349 | if self.tp_type == "j1939":
350 | df_raw_pgns = df_raw["ID"].apply(self.calculate_pgn)
351 | df_raw_match = df_raw_pgns.isin(res_id_list_full)
352 | res_id_list = res_id_list_full.copy()
353 | res_id_list.remove(bam_pgn)
354 | if type(res_id_list) is not list:
355 | res_id_list = [res_id_list]
356 | elif self.tp_type == "uds":
357 | df_raw_pgns = None
358 | df_raw_match = df_raw["ID"].isin(res_id_list_full)
359 | res_id_list = df_raw["ID"][df_raw_match].drop_duplicates().values.tolist()
360 |
361 | df_raw_tp = df_raw[df_raw_match]
362 | df_raw_excl_tp = df_raw[~df_raw_match]
363 |
364 | if len(df_raw) - len(df_raw_tp) - len(df_raw_excl_tp):
365 | print("Warning - total rows does not equal sum of rows incl/excl transport protocol frames")
366 |
367 | return df_raw_tp, df_raw_excl_tp, res_id_list, df_raw_pgns
368 |
369 | def filter_df_raw_tp(self, df_raw_tp, df_raw_tp_pgns,res_id):
370 | # filter df_raw_tp to include only frames for the specific response ID res_id
371 | if self.tp_type == "nmea":
372 | df_raw_tp_res_id = df_raw_tp[df_raw_tp_pgns.isin([res_id])]
373 | elif self.tp_type == "j1939":
374 | df_raw_tp_res_id = df_raw_tp
375 | df_raw_tp_res_id = df_raw_tp_res_id.copy()
376 | df_raw_tp_res_id["SA"] = df_raw_tp_res_id["ID"].apply(self.calculate_sa)
377 | else:
378 | df_raw_tp_res_id = df_raw_tp[df_raw_tp["ID"].isin([res_id])]
379 | return df_raw_tp_res_id
380 |
381 | def check_if_first_frame(self,row, bam_pgn, first_frame_mask,first_frame):
382 | # check if row reflects the first frame of a TP sequence
383 | if self.tp_type == "j1939" and bam_pgn == self.calculate_pgn(row.ID):
384 | first_frame_test = True
385 | elif (row.DataBytes[0] & first_frame_mask) == first_frame:
386 | first_frame_test = True
387 | else:
388 | first_frame_test = False
389 |
390 | return first_frame_test
391 |
392 | def pgn_to_can_id(self,row):
393 | # for J1939, extract PGN and convert to 29 bit CAN ID for use in baseframe
394 | pgn_hex = "".join("{:02x}".format(x) for x in reversed(row.DataBytes[5:8]))
395 | pgn = int(pgn_hex, 16)
396 | can_id = (6 << 26) | (pgn << 8) | row.SA
397 | return can_id
398 |
399 | def get_payload_length(self,row):
400 | if self.tp_type == "uds":
401 | ff_length = (row.DataBytes[0] & 0x0F) << 8 | row.DataBytes[1]
402 | if self.tp_type == "nmea":
403 | ff_length = row.DataBytes[1]
404 | if self.tp_type == "j1939":
405 | ff_length = int("".join("{:02x}".format(x) for x in reversed(row.DataBytes[1:2])),16)
406 | return ff_length
407 |
408 | def combine_tp_frames(self, df_raw):
409 | # main function that reassembles TP frames in df_raw
410 | import pandas as pd
411 |
412 | # if tp_type = "" return original df_raw
413 | if self.tp_type not in ["uds","nmea", "j1939"]:
414 | return df_raw
415 |
416 | # extract protocol specific TP frame info
417 | frame_struct = MultiFrameDecoder.FRAME_STRUCT[self.tp_type]
418 | res_id_list_full = frame_struct["res_id_list"]
419 | bam_pgn = frame_struct["bam_pgn"]
420 | ff_payload_start = frame_struct["ff_payload_start"]
421 | first_frame_mask = frame_struct["FIRST_FRAME_MASK"]
422 | first_frame = frame_struct["FIRST_FRAME"]
423 | single_frame_mask = frame_struct["SINGLE_FRAME_MASK"]
424 | single_frame = frame_struct["SINGLE_FRAME"]
425 | conseq_frame_mask = frame_struct["CONSEQ_FRAME_MASK"]
426 | conseq_frame = frame_struct["CONSEQ_FRAME"]
427 |
428 | # split df_raw in two (incl/excl TP frames)
429 | df_raw_tp, df_raw_excl_tp, res_id_list, df_raw_pgns = self.identify_matching_ids(df_raw,res_id_list_full, bam_pgn)
430 |
431 | # initiate new df_raw that will contain both the df_raw excl. TP frames and subsequently all combined TP frames
432 | df_raw = [df_raw_excl_tp]
433 |
434 | # for NMEA, apply PGN decoding outside loop
435 | if self.tp_type == "nmea":
436 | df_raw_tp_pgns = df_raw_tp["ID"].apply(self.calculate_pgn)
437 | else:
438 | df_raw_tp_pgns = None
439 |
440 | # loop through each relevant TP response ID
441 | for res_id in res_id_list:
442 |
443 | # get subset of df_raw_tp containing res_id
444 | df_raw_tp_res_id = self.filter_df_raw_tp(df_raw_tp,df_raw_tp_pgns, res_id)
445 |
446 | # distinguish channels
447 | for channel, df_channel in df_raw_tp_res_id.groupby("BusChannel"):
448 |
449 | # distinguish IDs from PGNs by grouping on ID (or SA for J1939)
450 | for identifier, df_raw_filter in df_channel.groupby(frame_struct["group"]):
451 | base_frame = df_raw_filter.iloc[0]
452 | frame_list = []
453 | frame_timestamp_list = []
454 | payload_concatenated = []
455 |
456 | ff_length = 0xFFF
457 | first_first_frame_test = True
458 | can_id = None
459 | conseq_frame_prev = None
460 |
461 | # iterate through rows in filtered dataframe
462 | for row in df_raw_filter.itertuples(index=True,name='Pandas'):
463 | index = row.Index
464 | first_frame_test = self.check_if_first_frame(row, bam_pgn, first_frame_mask,first_frame)
465 | first_byte = row.DataBytes[0]
466 |
467 | # if single frame, save frame directly (excl. 1st byte)
468 | if self.tp_type != "nmea" and (first_byte & single_frame_mask == single_frame):
469 | new_frame = self.construct_new_tp_frame(base_frame, row.DataBytes, row.ID)
470 | frame_list.append(new_frame.values.tolist())
471 | frame_timestamp_list.append(index)
472 |
473 | # if first frame, save info from prior multi frame response sequence,
474 | # then initialize a new sequence incl. the first frame payload
475 | elif first_frame_test:
476 | # create a new frame using information from previous iterations
477 | if len(payload_concatenated) >= ff_length:
478 | new_frame = self.construct_new_tp_frame(base_frame, payload_concatenated, can_id)
479 | frame_list.append(new_frame.values.tolist())
480 | frame_timestamp_list.append(frame_timestamp)
481 |
482 | # reset and start next frame with timestamp & CAN ID from this first frame plus initial payload
483 | conseq_frame_prev = None
484 | frame_timestamp = index
485 |
486 | if self.tp_type == "j1939":
487 | can_id = self.pgn_to_can_id(row)
488 |
489 | ff_length = self.get_payload_length(row)
490 | payload_concatenated = row.DataBytes[ff_payload_start:]
491 |
492 | # if consequtive frame, extend payload with payload excl. 1st byte
493 | elif (conseq_frame_prev == None) or ((first_byte - conseq_frame_prev) == 1):
494 | conseq_frame_prev = first_byte
495 | payload_concatenated += row.DataBytes[1:]
496 |
497 |
498 | df_raw_res_id_new = pd.DataFrame(frame_list, columns=base_frame.index, index=frame_timestamp_list)
499 | df_raw.append(df_raw_res_id_new)
500 |
501 | df_raw = pd.concat(df_raw,join='outer')
502 | df_raw.index.name = "TimeStamp"
503 | df_raw = df_raw.sort_index()
504 | return df_raw
505 |
--------------------------------------------------------------------------------