├── .gitignore ├── CHANGES.md ├── J1939db.json ├── LICENSE ├── README.md ├── create_j1939db-json.py ├── pretty_j1939.py ├── pretty_j1939 ├── __init__.py └── describe.py ├── setup.py └── testme.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | pip-wheel-metadata/ 21 | share/python-wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # IDEs 28 | .idea/ 29 | 30 | # Excel spreadsheets 31 | *.xls 32 | 33 | # ignore any testing resources in tmp/ so no one accidentally commits DAs or logs 34 | tmp/ 35 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | # 0.0.3 WIP 2 | 3 | * tested on J1939DA_201311.xls, J1939DA_201611.xls, J1939DA_201910.xls, and J1939DA_DEC2020.xls 4 | * fixed incorrectly dropping almost all multi byte SPNs thank you @s5y3XZpGvQPApqR 5 | * can read from stdin with '-' argument now 6 | * expands source addresses in DAs 7 | 8 | # 0.0.2 9 | 10 | * support for non-contiguous SPNs (thanks @j4l) 11 | * can describe SPNs in transport layer in real-time (as their bytes are received) (thanks @j4l) 12 | * correctly reassembles RTS-CTS transport sessions as well (thanks @j4l) 13 | * can specify J1939 JSON db on command line (thanks @j4l) 14 | * default to describing transport PGNs as first-class PGN 15 | * default to omitting description of incomplete frames 16 | -------------------------------------------------------------------------------- /J1939db.json: -------------------------------------------------------------------------------- 1 | { 2 | "COMMENT": "These are not the DBs you are looking for. See HOWTO in README.md", 3 | "J1939PGNdb": { 4 | }, 5 | "J1939SPNdb": { 6 | }, 7 | "J1939BitDecodings": { 8 | }, 9 | "J1939SATabledb": { 10 | } 11 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `pretty_j1939` 2 | 3 | python3 libs and scripts for pretty-printing J1939 candump logs. 4 | 5 | This package can: 6 | 1. pretty-print J1939 traffic captured in candump logs AND 7 | 1. convert a J1939 Digital Annex (Excel) file into a JSON structure for use in the above 8 | 9 | ## Some examples of pretty printing 10 | 11 | *Formatted* content (one per line) next to candump data: 12 | 13 | ```bash 14 | $ pretty_j1939.py --candata --format example.candump.txt | head 15 | (1543509533.000838) can0 10FDA300#FFFF07FFFFFFFFFF ; { 16 | ; "DA": "All(255)", 17 | ; "PGN": "EEC6(64931)", 18 | ; "SA": "Engine #1( 0)", 19 | ; "Engine Variable Geometry Turbocharger Actuator #1": "2.8000000000000003 [%]" 20 | ; } 21 | (1543509533.000915) can0 18FEE000#FFFFFFFFB05C6800 ; { 22 | ; "DA": "All(255)", 23 | ; "PGN": "VD(65248)", 24 | ; "SA": "Engine #1( 0)", 25 | ``` 26 | 27 | Single-line contents next to candump data: 28 | 29 | ```bash 30 | $ pretty_j1939.py --candata example.candump.txt | head 31 | (1543509533.000838) can0 10FDA300#FFFF07FFFFFFFFFF ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"EEC6(64931)","Engine Variable Geometry Turbocharger Actuator #1":"2.8000000000000003 [%]"} 32 | (1543509533.000915) can0 18FEE000#FFFFFFFFB05C6800 ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"VD(65248)","Total Vehicle Distance":"854934.0 [m]"} 33 | (1543509533.000991) can0 08FE6E0B#0000000000000000 ; {"SA":"Brakes - System Controller( 11)","DA":"All(255)","PGN":"HRW(65134)","Front Axle, Left Wheel Speed":"0.0 [kph]","Front axle, right wheel speed":"0.0 [kph]","Rear axle, left wheel speed":"0.0 [kph]","Rear axle, right wheel speed":"0.0 [kph]"} 34 | (1543509533.001070) can0 18FDB255#FFFFFFFF0100FFFF ; {"SA":"Diesel Particulate Filter Controller( 85)","DA":"All(255)","PGN":"AT1IMG(64946)","Aftertreatment 1 Diesel Particulate Filter Differential Pressure":"0.1 [kPa]"} 35 | (1543509533.001145) can0 0CF00400#207D87481400F087 ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"EEC1(61444)","Engine Torque Mode":"2 (Unknown)","Actual Engine - Percent Torque (Fractional)":"0.0 [%]","Driver's Demand Engine - Percent Torque":"0 [%]","Actual Engine - Percent Torque":"10 [%]","Engine Speed":"649.0 [rpm]","Source Address of Controlling Device for Engine Control":"0 [SA]","Engine Demand - Percent Torque":"10 [%]"} 36 | (1543509533.001220) can0 18FF4500#6D00FA00FF00006A ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"PropB_45(65349)","Manufacturer Defined Usage (PropB_PDU2)":"0x6d00fa00ff00006a"} 37 | (1543509533.001297) can0 18FEDF00#82FFFFFF7DE70300 ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"EEC3(65247)","Nominal Friction - Percent Torque":"5 [%]","Estimated Engine Parasitic Losses - Percent Torque":"0 [%]","Aftertreatment 1 Exhaust Gas Mass Flow Rate":"199.8 [kg/h]","Aftertreatment 1 Intake Dew Point":"0 (00 - Not exceeded the dew point)","Aftertreatment 1 Exhaust Dew Point":"0 (00 - Not exceeded the dew point)","Aftertreatment 2 Intake Dew Point":"0 (00 - Not exceeded the dew point)","Aftertreatment 2 Exhaust Dew Point":"0 (00 - Not exceeded the dew point)"} 38 | (1543509533.001372) can0 1CFE9200#FFFFFFFFFFFFFFFF ; {"SA":"Engine #1( 0)","DA":"All(255)","PGN":"EI1(65170)"} 39 | (1543509533.001447) can0 18F00131#FFFFFF3F00FFFFFF ; {"SA":"Cab Controller - Primary( 49)","DA":"All(255)","PGN":"EBC1(61441)","Accelerator Interlock Switch":"0 (00 - Off)","Engine Retarder Selection":"0.0 [%]"} 40 | (1543509533.001528) can0 18FEF131#F7FFFF07CCFFFFFF ; {"SA":"Cab Controller - Primary( 49)","DA":"All(255)","PGN":"CCVS1(65265)","Cruise Control Pause Switch":"1 (01 - On)","Cruise Control Active":"0 (00 - Cruise control switched off)","Cruise Control Enable Switch":"0 (00 - Cruise control disabled)","Brake Switch":"1 (01 - Brake pedal depressed)","Cruise Control Coast (Decelerate) Switch":"0 (00 - Cruise control activator not in the position \"coast\")","Cruise Control Accelerate Switch":"0 (00 - Cruise control activator not in the position \"accelerate\")"} 41 | ``` 42 | 43 | *Formatted* contents of complete frames only. 44 | 45 | ```bash 46 | $ pretty_j1939.py --format --no-link example.candump.txt | head 47 | { 48 | "PGN": "AT1HI1(64920)", 49 | "Aftertreatment 1 Total Fuel Used": "227.5 [liters]", 50 | "Aftertreatment 1 DPF Average Time Between Active Regenerations": "173933 [Seconds]", 51 | "Aftertreatment 1 DPF Average Distance Between Active Regenerations": "1460.5 [m]" 52 | } 53 | { 54 | "PGN": "AT1HI1(64920)", 55 | "Aftertreatment 1 Total Fuel Used": "227.5 [liters]", 56 | "Aftertreatment 1 DPF Average Time Between Active Regenerations": "173933 [Seconds]", 57 | ``` 58 | 59 | The JSON output can be used as an input to [`jq`](https://stedolan.github.io/jq/manual/) to filter or format the decoded data. E.g. we can show only messages 60 | from the "Brakes": 61 | 62 | ```sh 63 | $ pretty_j1939.py example.candump.txt --format | jq ". | select(.SA | contains(\"Brakes\"))" 64 | { 65 | "PGN": "TSC1(0)", 66 | "DA": "Retarder - Engine( 15)", 67 | "SA": "Brakes - System Controller( 11)", 68 | "Engine Requested Speed/Speed Limit": "8031.875 [rpm]", 69 | "Engine Requested Torque/Torque Limit": "-125 [%]" 70 | } 71 | { 72 | "PGN": "TSC1(0)", 73 | "DA": "Retarder - Driveline( 16)", 74 | "SA": "Brakes - System Controller( 11)", 75 | "Engine Requested Speed/Speed Limit": "8031.875 [rpm]", 76 | "Engine Requested Torque/Torque Limit": "-125 [%]" 77 | } 78 | { 79 | "PGN": "TSC1(0)", 80 | "DA": "Retarder, Exhaust, Engine #1( 41)", 81 | "SA": "Brakes - System Controller( 11)", 82 | "Engine Requested Speed/Speed Limit": "8031.875 [rpm]", 83 | "Engine Requested Torque/Torque Limit": "-125 [%]" 84 | } 85 | { 86 | "PGN": "EBC1(61441)", 87 | "DA": "All(255)", 88 | "SA": "Brakes - System Controller( 11)", 89 | "ASR Brake Control Active": "0 (00 - ASR brake control passive but installed)", 90 | "Anti-Lock Braking (ABS) Active": "0 (00 - ABS passive but installed)", 91 | [...] 92 | ``` 93 | 94 | ## HOWTO 95 | 96 | First, obtain a copy of the digital annex, see https://www.sae.org/standards/content/j1939da_201907/ for details. 97 | 98 | Then, use the `create_j1939db-json.py` script to convert that Digital Annex into a JSON file e.g. 99 | 100 | ```bash 101 | create_j1939db-json.py -f tmp/J1939DA_201611.xls -w tmp/J1939DA_201611.json 102 | ``` 103 | 104 | Place the resulting JSON file at `J1939db.json` in your working directory and use the pretty-printing script e.g. 105 | 106 | ```bash 107 | pretty_j1939.py example.candump.txt 108 | ``` 109 | 110 | The `pretty_j1939.py` script (and the `describer` in `pretty_j1939/describe.py` that it builds-on) has various levels of 111 | verbosity available when describing J1939 traffic in candump logs: 112 | 113 | ```bash 114 | usage: pretty_j1939.py [-h] [--da-json [DA_JSON]] [--candata] [--no-candata] [--pgn] [--no-pgn] [--spn] [--no-spn] [--transport] [--no-transport] 115 | [--link] [--no-link] [--include-na] [--no-include-na] [--real-time] [--no-real-time] [--format] [--no-format] 116 | candump 117 | 118 | pretty-printing J1939 candump logs 119 | 120 | positional arguments: 121 | candump candump log, use - for stdin 122 | 123 | optional arguments: 124 | -h, --help show this help message and exit 125 | --da-json [DA_JSON] absolute path to the input JSON DA (default="./J1939db.json") 126 | --candata print input can data 127 | --no-candata (default) 128 | --pgn (default) print source/destination/type description 129 | --no-pgn 130 | --spn (default) print signals description 131 | --no-spn 132 | --transport print details of transport-layer streams found (default) 133 | --no-transport 134 | --link print details of link-layer frames found 135 | --no-link (default) 136 | --include-na include not-available (0xff) SPN values 137 | --no-include-na (default) 138 | --real-time emit SPNs as they are seen in transport sessions 139 | --no-real-time (default) 140 | --format format each structure (otherwise single-line) 141 | --no-format (default) 142 | ``` 143 | 144 | To use as a library one can import the pretty_j1939 modules class as `import pretty_j1939` and instantiate a `describer` 145 | with `describe = pretty_j1939.describe.get_describer()`. That `get_describer()` function has defaults that match the 146 | above command-line utility and accepts similar flags for customization. Then frames can be described by calling 147 | `describe(message_data.bytes, message_id.uint)` where `message_data` and `message_id` are both of type `bitstring.Bits` 148 | created from the hex id and data strings (lsb on left). 149 | 150 | Note that the interpretation is done per message. In case of multipacket messages, transport messages are buffered 151 | unless `real-time=True` is specified as an argument to `get_describer()` 152 | 153 | ## Installing 154 | 155 | ```bash 156 | pip3 install pretty_j1939 157 | ``` 158 | 159 | ## Testing 160 | 161 | There is a very basic testing script `testme.sh` which will attempt to `create_j1939db-json.py` each `tmp/*.xls` and 162 | then try some `pretty_j1939.py` runs with each of the resulting DA json files over all `tmp/*.log`. This is 163 | meant as a sanity test only. To test changes in `create_j1939db-json.py` the contents of the resulting DA json file must 164 | be compared to previous versions and analyzed manually; to test changes in `describe.py` or `pretty_j1939.py` the output 165 | needs to be similarly analyzed manually. 166 | 167 | There are unfortunately no `*.xls`, `*.json`, nor `*.log` distributed with this repo, you will need to bring your own. 168 | 169 | ## Notes on Digital Annex Sources 170 | 171 | You need to obtain a J1939 Digital Annex from the SAE to create a JSON file that can be used by `pretty_j1939.py` see 172 | https://www.sae.org/standards/content/j1939da_201907/ for details. 173 | 174 | There are multiple releases; here are a couple notes to consider when purchasing your copy of the Digital Annex. 175 | * the 201611 Digital Annex has fewer defined SPNs in it than the 201311 Digital Annex; at some point the owners of the 176 | DA started migrating 'technical' SPNs (e.g. DMs) to other documents and out of the DA 177 | * the 201311 Digital Annex has a couple bugs in it that the `create_j1939db-json.py` has workarounds for 178 | * the `create_j1939db-json.py` can also handle the XLS Export from isobus.net by supplying multiple excel sheets 179 | as input (with multiple `-f` arguments); however, the isobus.net definitions omit almost all of the commercial vehicle 180 | SPNs and PGNs so the resulting `J1939db.json` file may not be of great use in examining candump captures from commercial 181 | vehicles. 182 | 183 | ## Future Work 184 | 185 | * port this functionality to the [python-j1939](https://github.com/milhead2/python-j1939) and 186 | [python-can](https://github.com/hardbyte/python-can/) projects 187 | * default JSON database (of limited content) based on public information 188 | * support for J1939 aspects not encoded in the Digital Annex (ever, or anymore) e.g. Address Claim, DMs 189 | * integrate and/or move `create_j1939-db-json.py` to [canmatrix](https://canmatrix.readthedocs.io/en/latest/) 190 | * colorize the json output (and avoid breaking pipelines) -------------------------------------------------------------------------------- /create_j1939db-json.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2019 National Motor Freight Traffic Association Inc. All Rights Reserved. 3 | # See the file "LICENSE" for the full license governing this code. 4 | # 5 | 6 | from collections import OrderedDict 7 | import defusedxml 8 | from defusedxml.common import EntitiesForbidden 9 | import xlrd 10 | import sys 11 | import re 12 | import unidecode 13 | import asteval 14 | import json 15 | import argparse 16 | import functools 17 | import operator 18 | import itertools 19 | import pretty_j1939.describe 20 | 21 | ENUM_SINGLE_LINE_RE = r'[ ]*([0-9bxXA-F]+)[ ]*[-=:]?(.*)' 22 | ENUM_RANGE_LINE_RE = r'[ ]*([0-9bxXA-F]+)[ ]*(\-|to|thru)[ ]*([0-9bxXA-F]+)[ ]+[-=:]?(.*)' 23 | 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('-f', '--digital_annex_xls', type=str, required=True, action='append', 26 | default=[], nargs='+', 27 | help="the J1939 Digital Annex .xls excel file used as input") 28 | parser.add_argument('-w', '--write-json', type=str, default='-', 29 | help="where to write the output. defaults to stdout") 30 | args = parser.parse_args() 31 | 32 | 33 | class J1939daConverter: 34 | def __init__(self, digital_annex_xls_list): 35 | defusedxml.defuse_stdlib() 36 | self.j1939db = OrderedDict() 37 | self.digital_annex_xls_list = list(map(lambda da: self.secure_open_workbook(filename=da, on_demand=True), 38 | digital_annex_xls_list)) 39 | 40 | @staticmethod 41 | def secure_open_workbook(**kwargs): 42 | try: 43 | return xlrd.open_workbook(**kwargs) 44 | except EntitiesForbidden: 45 | raise ValueError('Please use an excel file without XEE') 46 | 47 | @staticmethod 48 | # returns a string of number of bits, or 'Variable', or '' 49 | def get_pgn_data_len(contents): 50 | if type(contents) is float: 51 | return str(int(contents)) 52 | elif 'bytes' not in contents.lower() and 'variable' not in contents.lower(): 53 | return str(contents) 54 | elif 'bytes' in contents.lower(): 55 | return str(int(contents.split(' ')[0]) * 8) 56 | elif 'variable' in contents.lower(): 57 | return 'Variable' 58 | elif contents.strip() == '': 59 | return '' 60 | raise ValueError('unknown PGN Length "%s"' % contents) 61 | 62 | @staticmethod 63 | # returns an int number of bits, or 'Variable' 64 | def get_spn_len(contents): 65 | if 'to' in contents.lower() or \ 66 | contents.strip() == '' or \ 67 | 'variable' in contents.lower(): 68 | return 'Variable' 69 | elif re.match(r'max [0-9]+ bytes', contents): 70 | return 'Variable' 71 | elif 'byte' in contents.lower(): 72 | return int(contents.split(' ')[0]) * 8 73 | elif 'bit' in contents.lower(): 74 | return int(contents.split(' ')[0]) 75 | elif re.match(r'^[0-9]+$', contents): 76 | return int(contents) 77 | raise ValueError('unknown SPN Length "%s"' % contents) 78 | 79 | @staticmethod 80 | # returns a single-byte delimiter or None 81 | def get_spn_delimiter(contents): 82 | if 'delimiter' in contents.lower(): 83 | if '*' in contents: 84 | return b'*' 85 | elif 'NULL' in contents: 86 | return b'\x00' 87 | else: 88 | raise ValueError('unknown SPN delimiter "%s"' % contents) 89 | else: 90 | return None 91 | 92 | @staticmethod 93 | def just_numeric_expr(contents): 94 | contents = re.sub(r'[^0-9\.\-/]', '', contents) # remove all but number and '.' 95 | contents = re.sub(r'[/-]+[ ]*$', '', contents) # remove trailing '/' or '-' that are sometimes left 96 | return contents 97 | 98 | @staticmethod 99 | def get_spn_units(contents, raw_spn_resolution): 100 | norm_contents = unidecode.unidecode(contents).lower().strip() 101 | raw_spn_resolution = unidecode.unidecode(raw_spn_resolution).lower().strip() 102 | if norm_contents == '': 103 | if 'states' in raw_spn_resolution: 104 | norm_contents = 'bit' 105 | elif 'bit-mapped' in raw_spn_resolution: 106 | norm_contents = 'bit-mapped' 107 | elif 'binary' in raw_spn_resolution: 108 | norm_contents = 'binary' 109 | elif 'ascii' in raw_spn_resolution: 110 | norm_contents = 'ascii' 111 | return norm_contents 112 | 113 | @staticmethod 114 | # returns a float in X per bit or int(0) 115 | def get_spn_resolution(contents): 116 | norm_contents = unidecode.unidecode(contents).lower() 117 | if '0 to 255 per byte' in norm_contents or \ 118 | ' states' in norm_contents or \ 119 | norm_contents == 'data specific': 120 | return 1.0 121 | elif 'bit-mapped' in norm_contents or \ 122 | 'binary' in norm_contents or \ 123 | 'ascii' in norm_contents or \ 124 | 'not defined' in norm_contents or \ 125 | 'variant determined' in norm_contents or \ 126 | '7 bit iso latin 1 characters' in norm_contents or \ 127 | contents.strip() == '': 128 | return int(0) 129 | elif 'per bit' in norm_contents or '/bit' in norm_contents: 130 | expr = J1939daConverter.just_numeric_expr(norm_contents) 131 | return J1939daConverter.asteval_eval(expr) 132 | elif 'bit' in norm_contents and '/' in norm_contents: 133 | left, right = contents.split('/') 134 | left = J1939daConverter.just_numeric_expr(left) 135 | right = J1939daConverter.just_numeric_expr(right) 136 | return J1939daConverter.asteval_eval('(%s)/(%s)' % (left, right)) 137 | elif 'microsiemens/mm' in norm_contents or \ 138 | 'usiemens/mm' in norm_contents or \ 139 | 'kw/s' in norm_contents: # special handling for this weirdness 140 | return float(contents.split(' ')[0]) 141 | raise ValueError('unknown spn resolution "%s"' % contents) 142 | 143 | @staticmethod 144 | def asteval_eval(expr): 145 | interpreter = asteval.Interpreter() 146 | ret = interpreter(expr) 147 | if len(interpreter.error)>0: 148 | raise interpreter.error[0] 149 | return ret 150 | 151 | @staticmethod 152 | # returns a float in 'units' of the SPN or int(0) 153 | def get_spn_offset(contents): 154 | norm_contents = unidecode.unidecode(contents).lower() 155 | if 'manufacturer defined' in norm_contents or 'not defined' in norm_contents or contents.strip() == '': 156 | return int(0) 157 | else: 158 | first = J1939daConverter.just_numeric_expr(contents) 159 | return J1939daConverter.asteval_eval(first) 160 | 161 | @staticmethod 162 | # returns a pair of floats (low, high) in 'units' of the SPN or (-1, -1) for undefined operational ranges 163 | def get_operational_hilo(contents, units, spn_length): 164 | norm_contents = contents.lower() 165 | if contents.strip() == '' and units.strip() == '': 166 | if type(spn_length) is int: 167 | return 0, 2**spn_length-1 168 | else: 169 | return -1, -1 170 | elif 'manufacturer defined' in norm_contents or\ 171 | 'bit-mapped' in norm_contents or\ 172 | 'not defined' in norm_contents or\ 173 | 'variant determined' in norm_contents or\ 174 | contents.strip() == '': 175 | return -1, -1 176 | elif ' to ' in norm_contents: 177 | left, right = norm_contents.split(' to ')[0:2] 178 | left = J1939daConverter.just_numeric_expr(left) 179 | right = J1939daConverter.just_numeric_expr(right) 180 | 181 | range_units = norm_contents.split(' ') 182 | range_units = range_units[len(range_units) - 1] 183 | lo = float(J1939daConverter.asteval_eval(left)) 184 | hi = float(J1939daConverter.asteval_eval(right)) 185 | if range_units == 'km' and units == 'm': 186 | return lo * 1000, hi * 1000 187 | else: 188 | return lo, hi 189 | raise ValueError('unknown operational range from "%s","%s"' % (contents, units)) 190 | 191 | @staticmethod 192 | # return a list of int of the start bits ([some_bit_pos] or [some_bit_pos,some_other_bit_pos]) of the SPN; or [ 193 | # -1] (if unknown or variable). 194 | def get_spn_start_bit(contents): 195 | norm_contents = contents.lower() 196 | 197 | if ';' in norm_contents: # special handling for e.g. '0x00;2' 198 | return [-1] 199 | 200 | # Explanation of multi-startbit (from J4L): According to 1939-71, "If the data length is larger than 1 byte 201 | # or the data spans a byte boundary, then the Start Position consists of two numerical values separated by a 202 | # comma or dash." Therefore , and - may be treated in the same way, multi-startbit. To account for 203 | # multi-startbit we will introduce the following: 1> an SPN position is now a pair of bit positions (R,S), 204 | # where S = None if not multibit 2> the SPN length is now a pair (Rs, Ss), where Ss = None if not multibit, 205 | # else net Rs = (S - R + 1) and Ss = (Length - Rs) 206 | 207 | delim = "" 208 | firsts = [norm_contents] 209 | if ',' in norm_contents: 210 | delim = "," 211 | if '-' in norm_contents: 212 | delim = "-" 213 | elif ' to ' in norm_contents: 214 | delim = " to " 215 | 216 | if len(delim) > 0: 217 | firsts = norm_contents.split(delim) 218 | 219 | if any(re.match(r'^[a-z]\+[0-9]', first) for first in firsts): 220 | return [-1] 221 | 222 | firsts = [J1939daConverter.just_numeric_expr(first) for first in firsts] 223 | if any(first.strip() == '' for first in firsts): 224 | return [-1] 225 | 226 | pos_pair = [] 227 | for first in firsts: 228 | if '.' in first: 229 | byte_index, bit_index = list(map(int, first.split('.'))) 230 | else: 231 | bit_index = 1 232 | byte_index = int(first) 233 | pos_pair.append((byte_index - 1) * 8 + (bit_index - 1)) 234 | 235 | return pos_pair 236 | 237 | @staticmethod 238 | def is_enum_line(line): 239 | if line.lower().startswith('bit state'): 240 | return True 241 | elif re.match(r'^[ ]*[0-9][0-9bxXA-F\-:]*[ ]+[^ ]+', line): 242 | return True 243 | return False 244 | 245 | @staticmethod 246 | def get_enum_lines(description_lines): 247 | enum_lines = list() 248 | 249 | def add_enum_line(test_line): 250 | test_line = re.sub(r'(Bit States|Bit State)', '', test_line, flags=re.IGNORECASE) 251 | if any(e in test_line for e in [': Tokyo', ' SPN 8846 ', ' SPN 8842 ', ' SPN 3265 ', ' SPN 3216 ', '13 preprogrammed intermediate ', '3 ASCII space characters']): 252 | return False 253 | enum_lines.append(test_line) 254 | return True 255 | 256 | any_found = False 257 | for line in description_lines: 258 | if J1939daConverter.is_enum_line(line): 259 | if any_found: 260 | add_enum_line(line) 261 | else: 262 | if J1939daConverter.match_single_enum_line(line): # special handling: first enum must use single assignment 263 | any_found = add_enum_line(line) 264 | 265 | return enum_lines 266 | 267 | @staticmethod 268 | def is_enum_lines_binary(enum_lines_only): 269 | all_ones_and_zeroes = True 270 | for line in enum_lines_only: 271 | first = J1939daConverter.match_single_enum_line(line).groups()[0] 272 | if re.sub(r'[^10b]', '', first) != first: 273 | all_ones_and_zeroes = False 274 | break 275 | 276 | return all_ones_and_zeroes 277 | 278 | @staticmethod 279 | # returns a pair of inclusive, inclusive range boundaries or None if this line is not a range 280 | def get_enum_line_range(line): 281 | match = re.match(ENUM_RANGE_LINE_RE, line) 282 | if match: 283 | groups = match.groups() 284 | if re.match(r'[01b]', groups[0]) and not re.match(r'[01b]', groups[2]): 285 | return None 286 | return groups[0], groups[2] 287 | else: 288 | return None 289 | 290 | @staticmethod 291 | def match_single_enum_line(line): 292 | line = re.sub(r'[ ]+', ' ', line) 293 | line = re.sub(r'[ ]?\-\-[ ]?', ' = ', line) 294 | return re.match(ENUM_SINGLE_LINE_RE, line) 295 | 296 | @staticmethod 297 | # returns the description part (just that part) of an enum line 298 | def get_enum_line_description(line): 299 | line = re.sub(r'[ ]+', ' ', line) 300 | line = re.sub(r'[ ]?\-\-[ ]?', ' = ', line) 301 | match = re.match(ENUM_RANGE_LINE_RE, line) 302 | if match: 303 | line = match.groups()[-1] 304 | else: 305 | match = J1939daConverter.match_single_enum_line(line) 306 | if match: 307 | line = match.groups()[-1] 308 | line = line.strip() 309 | line = line.lower() 310 | line = line.replace('sae', 'SAE').replace('iso', 'ISO') 311 | return line 312 | 313 | @staticmethod 314 | def create_bit_object_from_description(spn_description, bit_object): 315 | description_lines = spn_description.splitlines() 316 | enum_lines = J1939daConverter.get_enum_lines(description_lines) 317 | is_binary = J1939daConverter.is_enum_lines_binary(enum_lines) 318 | 319 | for line in enum_lines: 320 | enum_description = J1939daConverter.get_enum_line_description(line) 321 | 322 | range_boundaries = J1939daConverter.get_enum_line_range(line) 323 | if range_boundaries is not None: 324 | if is_binary: 325 | first = re.sub(r'b', '', range_boundaries[0]) 326 | first_val = int(first, base=2) 327 | second = re.sub(r'b', '', range_boundaries[1]) 328 | second_val = int(second, base=2) 329 | elif 'x' in range_boundaries[0].lower(): 330 | first_val = int(range_boundaries[0], base=16) 331 | second_val = int(range_boundaries[1], base=16) 332 | else: 333 | first_val = int(range_boundaries[0], base=10) 334 | second_val = int(range_boundaries[1], base=10) 335 | 336 | for i in range(first_val, second_val+1): 337 | bit_object.update(({str(i): enum_description})) 338 | else: 339 | first = re.match(r'[ ]*([0-9bxXA-F]+)', line).groups()[0] 340 | 341 | if is_binary: 342 | first = re.sub(r'b', '', first) 343 | val = str(int(first, base=2)) 344 | elif 'x' in first.lower(): 345 | val = str(int(first, base=16)) 346 | else: 347 | val = str(int(first, base=10)) 348 | 349 | bit_object.update(({val: enum_description})) 350 | 351 | @staticmethod 352 | def is_spn_likely_bitmapped(spn_description): 353 | return len(J1939daConverter.get_enum_lines(spn_description.splitlines())) > 2 354 | 355 | def process_spns_and_pgns_tab(self, sheet): 356 | self.j1939db.update({'J1939PGNdb': OrderedDict()}) 357 | j1939_pgn_db = self.j1939db.get('J1939PGNdb') 358 | self.j1939db.update({'J1939SPNdb': OrderedDict()}) 359 | j1939_spn_db = self.j1939db.get('J1939SPNdb') 360 | self.j1939db.update({'J1939BitDecodings': OrderedDict()}) 361 | j1939_bit_decodings = self.j1939db.get('J1939BitDecodings') 362 | 363 | # check for SPNs in multiple PNGs 364 | spn_factcheck_map = dict() 365 | 366 | header_row, header_row_num = self.get_header_row(sheet) 367 | pgn_col = self.get_any_header_column(header_row, 'PGN') 368 | spn_col = self.get_any_header_column(header_row, 'SPN') 369 | acronym_col = self.get_any_header_column(header_row, 370 | ['ACRONYM', 'PG_ACRONYM']) 371 | pgn_label_col = self.get_any_header_column(header_row, 372 | ['PARAMETER_GROUP_LABEL', 'PG_LABEL']) 373 | pgn_data_length_col = self.get_any_header_column(header_row, 374 | ['PGN_DATA_LENGTH', 'PG_DATA_LENGTH']) 375 | transmission_rate_col = self.get_any_header_column(header_row, 'TRANSMISSION_RATE') 376 | spn_position_in_pgn_col = self.get_any_header_column(header_row, 377 | ['SPN_POSITION_IN_PGN','SP_POSITION_IN_PG']) 378 | spn_name_col = self.get_any_header_column(header_row, 379 | ['SPN_NAME', 'SP_LABEL']) 380 | offset_col = self.get_any_header_column(header_row, 'OFFSET') 381 | data_range_col = self.get_any_header_column(header_row, 'DATA_RANGE') 382 | resolution_col = self.get_any_header_column(header_row, 383 | ['RESOLUTION', 'SCALING']) 384 | spn_length_col = self.get_any_header_column(header_row, 385 | ['SPN_LENGTH', 'SP_LENGTH']) 386 | units_col = self.get_any_header_column(header_row, 387 | ['UNITS', 'UNIT']) 388 | operational_range_col = self.get_any_header_column(header_row, 'OPERATIONAL_RANGE') 389 | spn_description_col = self.get_any_header_column(header_row, 390 | ['SPN_DESCRIPTION', 'SP_DESCRIPTION']) 391 | 392 | for i in range(header_row_num+1, sheet.nrows): 393 | row = sheet.row_values(i) 394 | pgn = row[pgn_col] 395 | if pgn == '': 396 | continue 397 | 398 | pgn_label = str(int(pgn)) 399 | 400 | spn = row[spn_col] 401 | 402 | if not j1939_pgn_db.get(pgn_label) is None: 403 | # TODO assert that PGN values haven't changed across multiple SPN rows 404 | pass 405 | else: 406 | pgn_object = OrderedDict() 407 | 408 | pgn_data_len = self.get_pgn_data_len(row[pgn_data_length_col]) 409 | 410 | pgn_object.update({'Label': unidecode.unidecode(row[acronym_col])}) 411 | pgn_object.update({'Name': unidecode.unidecode(row[pgn_label_col])}) 412 | pgn_object.update({'PGNLength': pgn_data_len}) 413 | pgn_object.update({'Rate': unidecode.unidecode(row[transmission_rate_col])}) 414 | pgn_object.update({'SPNs': list()}) 415 | pgn_object.update({'SPNStartBits': list()}) 416 | pgn_object.update({'Temp_SPN_Order': list()}) 417 | 418 | j1939_pgn_db.update({pgn_label: pgn_object}) 419 | 420 | if pretty_j1939.describe.is_transport_pgn(int(pgn)): # skip all SPNs for transport PGNs 421 | continue 422 | 423 | if not spn == '': 424 | if spn_factcheck_map.get(spn, None) is None: 425 | spn_factcheck_map.update({spn: [pgn, ]}) 426 | else: 427 | spn_list = spn_factcheck_map.get(spn) 428 | spn_list.append(spn) 429 | spn_factcheck_map.update({spn: spn_list}) 430 | 431 | spn_label = str(int(spn)) 432 | spn_object = OrderedDict() 433 | 434 | spn_length = self.get_spn_len(row[spn_length_col]) 435 | if type(spn_length) == str and spn_length.startswith("Variable"): 436 | spn_delimiter = self.get_spn_delimiter(row[spn_length_col]) 437 | else: 438 | spn_delimiter = None 439 | 440 | spn_resolution = self.get_spn_resolution(row[resolution_col]) 441 | spn_units = self.get_spn_units(row[units_col], row[resolution_col]) 442 | data_range = unidecode.unidecode(row[data_range_col]) 443 | low, high = self.get_operational_hilo(data_range, spn_units, spn_length) 444 | 445 | spn_name = unidecode.unidecode(row[spn_name_col]) 446 | operational_range = unidecode.unidecode(row[operational_range_col]) 447 | spn_offset = self.get_spn_offset(row[offset_col]) 448 | 449 | spn_object.update({'DataRange': data_range}) 450 | spn_object.update({'Name': spn_name}) 451 | spn_object.update({'Offset': spn_offset}) 452 | spn_object.update({'OperationalHigh': high}) 453 | spn_object.update({'OperationalLow': low}) 454 | spn_object.update({'OperationalRange': operational_range}) 455 | spn_object.update({'Resolution': spn_resolution}) 456 | spn_object.update({'SPNLength': spn_length}) 457 | if spn_delimiter is not None: 458 | spn_object.update({'Delimiter': '0x%s' % spn_delimiter.hex()}) 459 | spn_object.update({'Units': spn_units}) 460 | 461 | existing_spn = j1939_spn_db.get(str(int(spn))) 462 | if existing_spn is not None and not existing_spn == spn_object: 463 | print("Warning: changed details of SPN %s:\n %s vs previous:\n %s" % 464 | (spn, existing_spn, spn_object), file=sys.stderr) 465 | else: 466 | j1939_spn_db.update({spn_label: spn_object}) 467 | 468 | # record SPN position-in-PGN ('StartBit') in the PGN structure along with the list of SPNs -- or skip 469 | # this SPN 470 | try: 471 | spn_position_contents = row[spn_position_in_pgn_col] 472 | spn_startbit_inpgn = self.get_spn_start_bit(spn_position_contents) 473 | if spn_label == '5998' and spn_position_contents.strip() == '4.4': # bug in 201311 DA 474 | spn_startbit_inpgn = self.get_spn_start_bit('4.5') 475 | elif spn_label == '3036' and spn_position_contents.strip() == '6-8.6': # bug in 201311 DA 476 | spn_startbit_inpgn = self.get_spn_start_bit('6-7,8.6') 477 | elif spn_label == '6062' and spn_position_contents.strip() == '4.4': # bug in 201311 DA 478 | spn_startbit_inpgn = self.get_spn_start_bit('4.5') 479 | elif spn_label == '6030' and spn_position_contents.strip() == '4.4': # bug in 201311 DA 480 | spn_startbit_inpgn = self.get_spn_start_bit('4.5') 481 | 482 | if spn_startbit_inpgn == [-1]: 483 | spn_order_inpgn = spn_position_contents.strip() 484 | else: 485 | spn_order_inpgn = spn_startbit_inpgn 486 | except ValueError: 487 | continue 488 | 489 | if spn_label == '6610' or spn_label == '6815': # bug in PGN map in 201311 DA 490 | continue 491 | 492 | # Back to PGN processing 493 | 494 | j1939_pgn_db.get(pgn_label).get('SPNs').append(int(spn)) 495 | # TODO strip consecutive startbits e.g. '[8, 16, 24]' for a 24bit val should be just '8' 496 | j1939_pgn_db.get(pgn_label).get('SPNStartBits').append([int(s) for s in spn_startbit_inpgn]) 497 | # the Temp_SPN_Order list will be deleted later 498 | j1939_pgn_db.get(pgn_label).get('Temp_SPN_Order').append(spn_order_inpgn) 499 | 500 | # If there is a bitfield/enum described in this row, then create a separate object describing the states 501 | spn_description = unidecode.unidecode(row[spn_description_col]) 502 | if row[units_col] == 'bit' or self.is_spn_likely_bitmapped(spn_description): 503 | bit_object = OrderedDict() 504 | self.create_bit_object_from_description(spn_description, bit_object) 505 | if len(bit_object) > 0: 506 | j1939_bit_decodings.update({spn_label: bit_object}) 507 | 508 | # Clean-ups are needed. The next steps are to do: 509 | # 1. sort SPN lists in PGNs by the Temp_SPN_Order 510 | # 2. fix the starting sequence of -1 startbits in PGNs with fixed-len SPNs mapped 511 | # 3. fix incorrectly variable-len SPNs in a sequence known startbits 512 | # 4. remove any SPN maps that have variable-len, no-delimiter SPNs in a PGN with >1 SPN mapped 513 | # 5. remove Temp_SPN_Order 514 | # 6. remove zero-len startbits arrays 515 | 516 | # * sort SPN lists in PGNs by the Temp_SPN_Order 517 | self.sort_spns_by_order(j1939_pgn_db) 518 | 519 | # * fix the starting sequence of -1 startbits in PGNs with fixed-len SPNs mapped 520 | self.remove_startbitsunknown_spns(j1939_pgn_db, j1939_spn_db) 521 | 522 | # * fix incorrectly variable-len SPNs in a sequence known startbits 523 | self.fix_omittedlen_spns(j1939_pgn_db, j1939_spn_db) 524 | 525 | # * remove any SPN maps that have variable-len, no-delimiter SPNs in a PGN with >1 SPN mapped 526 | self.remove_underspecd_spns(j1939_pgn_db, j1939_spn_db) 527 | 528 | # * remove Temp_SPN_Order 529 | for pgn, pgn_object in j1939_pgn_db.items(): 530 | pgn_object.pop('Temp_SPN_Order') 531 | 532 | # * remove zero-len startbits arrays 533 | for pgn, pgn_object in j1939_pgn_db.items(): 534 | spn_list = pgn_object.get('SPNs') 535 | if len(spn_list) == 0: 536 | pgn_object.pop('SPNStartBits') 537 | 538 | return 539 | 540 | def get_any_header_column(self, header_row, header_texts): 541 | if not isinstance(header_texts, list): 542 | header_texts = [header_texts] 543 | for t in header_texts: 544 | try: 545 | return header_row.index(t) 546 | except ValueError: 547 | continue 548 | return -1 549 | 550 | def get_header_row(self, sheet): 551 | header_row_num = self.lookup_header_row(sheet) 552 | 553 | header_row = sheet.row_values(header_row_num) 554 | header_row = list(map(lambda x: x.upper(), header_row)) 555 | header_row = list(map(lambda x: x.replace(' ', '_'), header_row)) 556 | return header_row, header_row_num 557 | 558 | def lookup_header_row(self, sheet): 559 | if sheet.row_values(0)[3].strip() == '': 560 | return 3 561 | else: 562 | return 0 563 | 564 | @staticmethod 565 | def fix_omittedlen_spns(j1939_pgn_db, j1939_spn_db): 566 | modified_spns = dict() 567 | for pgn, pgn_object in j1939_pgn_db.items(): 568 | spn_list = pgn_object.get('SPNs') 569 | spn_startbit_list = pgn_object.get('SPNStartBits') 570 | spn_order_list = pgn_object.get('Temp_SPN_Order') 571 | 572 | spn_in_pgn_list = list(zip(spn_list, spn_startbit_list, spn_order_list)) 573 | if J1939daConverter.all_spns_positioned(spn_startbit_list): 574 | for i in range(0, len(spn_in_pgn_list) - 1): 575 | here_startbit = int(spn_in_pgn_list[i][1][0]) 576 | next_startbit = int(spn_in_pgn_list[i + 1][1][0]) 577 | calced_spn_length = next_startbit - here_startbit 578 | here_spn = spn_in_pgn_list[i][0] 579 | 580 | if calced_spn_length == 0: 581 | print("Warning: calculated zero-length SPN %s in PGN %s" % (here_spn, pgn), file=sys.stderr) 582 | continue 583 | else: 584 | spn_obj = j1939_spn_db.get(str(here_spn)) 585 | current_spn_length = spn_obj.get('SPNLength') 586 | if J1939daConverter.is_length_variable(current_spn_length): 587 | spn_obj.update({'SPNLength': calced_spn_length}) 588 | modified_spns.update({here_spn: True}) 589 | elif calced_spn_length < current_spn_length and modified_spns.get(here_spn) is None: 590 | print("Warning: calculated length for SPN %s (%d) in PGN %s differs from existing SPN " 591 | "length %s" % (here_spn, calced_spn_length, pgn, current_spn_length), file=sys.stderr) 592 | 593 | @staticmethod 594 | def is_length_variable(spn_length): 595 | return type(spn_length) is str and spn_length.startswith('Variable') 596 | 597 | @staticmethod 598 | def remove_startbitsunknown_spns(j1939_pgn_db, j1939_spn_db): 599 | for pgn, pgn_object in j1939_pgn_db.items(): 600 | spn_list = pgn_object.get('SPNs') 601 | if len(spn_list) > 1: 602 | spn_list = pgn_object.get('SPNs') 603 | spn_startbit_list = pgn_object.get('SPNStartBits') 604 | spn_order_list = pgn_object.get('Temp_SPN_Order') 605 | 606 | spn_in_pgn_list = list(zip(spn_list, spn_startbit_list, spn_order_list)) 607 | for i in range(0, len(spn_in_pgn_list)): 608 | here_startbit = int(spn_in_pgn_list[i][1][0]) 609 | prev_spn = spn_in_pgn_list[i - 1][0] 610 | prev_spn_obj = j1939_spn_db.get(str(prev_spn)) 611 | prev_spn_len = prev_spn_obj.get('SPNLength') 612 | if here_startbit == -1 and not J1939daConverter.is_length_variable(prev_spn_len): 613 | if (i - 1) == 0: # special case for the first field 614 | prev_startbit = 0 615 | here_startbit = prev_spn_len 616 | prev_tuple = list(spn_in_pgn_list[i - 1]) 617 | prev_tuple[1] = [prev_startbit] 618 | spn_in_pgn_list[i - 1] = tuple(prev_tuple) 619 | else: 620 | prev_startbit = int(spn_in_pgn_list[i - 1][1][0]) 621 | here_startbit = prev_startbit + prev_spn_len 622 | here_tuple = list(spn_in_pgn_list[i]) 623 | here_tuple[1] = [here_startbit] 624 | spn_in_pgn_list[i] = tuple(here_tuple) 625 | 626 | # update the maps 627 | pgn_object.update({'SPNs': list(map(operator.itemgetter(0), spn_in_pgn_list))}) 628 | pgn_object.update({'SPNStartBits': list(map(operator.itemgetter(1), spn_in_pgn_list))}) 629 | pgn_object.update({'Temp_SPN_Order': list(map(operator.itemgetter(2), spn_in_pgn_list))}) 630 | 631 | @staticmethod 632 | def remove_underspecd_spns(j1939_pgn_db, j1939_spn_db): 633 | for pgn, pgn_object in j1939_pgn_db.items(): 634 | spn_list = pgn_object.get('SPNs') 635 | if len(spn_list) > 1: 636 | spn_list = pgn_object.get('SPNs') 637 | spn_startbit_list = pgn_object.get('SPNStartBits') 638 | spn_order_list = pgn_object.get('Temp_SPN_Order') 639 | 640 | spn_in_pgn_list = zip(spn_list, spn_startbit_list, spn_order_list) 641 | 642 | def should_remove(tup): 643 | spn = tup[0] 644 | spn_obj = j1939_spn_db.get(str(spn)) 645 | current_spn_length = spn_obj.get('SPNLength') 646 | current_spn_delimiter = spn_obj.get('Delimiter') 647 | if J1939daConverter.is_length_variable(current_spn_length) and \ 648 | current_spn_delimiter is None: 649 | print("Warning: removing SPN %s from PGN %s because it " 650 | "is variable-length with no delimiter in a multi-SPN PGN. " 651 | "This likely an under-specification in the DA." % (spn, pgn), file=sys.stderr) 652 | return True 653 | return False 654 | 655 | spn_in_pgn_list = [tup for tup in spn_in_pgn_list if not should_remove(tup)] 656 | 657 | # update the maps 658 | pgn_object.update({'SPNs': list(map(operator.itemgetter(0), spn_in_pgn_list))}) 659 | pgn_object.update({'SPNStartBits': list(map(operator.itemgetter(1), spn_in_pgn_list))}) 660 | pgn_object.update({'Temp_SPN_Order': list(map(operator.itemgetter(2), spn_in_pgn_list))}) 661 | 662 | @staticmethod 663 | def sort_spns_by_order(j1939_pgn_db): 664 | for pgn, pgn_object in j1939_pgn_db.items(): 665 | spn_list = pgn_object.get('SPNs') 666 | spn_startbit_list = pgn_object.get('SPNStartBits') 667 | spn_order_list = pgn_object.get('Temp_SPN_Order') 668 | 669 | spn_in_pgn_list = zip(spn_list, spn_startbit_list, spn_order_list) 670 | # sort numbers then letters 671 | spn_in_pgn_list = sorted(spn_in_pgn_list, key=lambda obj: (isinstance(obj[2], str), obj[2])) 672 | 673 | # update the maps (now sorted by 'Temp_SPN_Order') 674 | pgn_object.update({'SPNs': list(map(operator.itemgetter(0), spn_in_pgn_list))}) 675 | pgn_object.update({'SPNStartBits': list(map(operator.itemgetter(1), spn_in_pgn_list))}) 676 | pgn_object.update({'Temp_SPN_Order': list(map(operator.itemgetter(2), spn_in_pgn_list))}) 677 | 678 | @staticmethod 679 | def all_spns_positioned(spn_startbit_list): 680 | if len(spn_startbit_list) == 0: 681 | return True 682 | else: 683 | is_positioned = map(lambda spn_startbit: int(spn_startbit[0]) != -1, spn_startbit_list) 684 | return functools.reduce(lambda a, b: a and b, is_positioned) 685 | 686 | def process_any_source_addresses_sheet(self, sheet): 687 | if self.j1939db.get('J1939SATabledb') is None: 688 | self.j1939db.update({'J1939SATabledb': OrderedDict()}) 689 | j1939_sa_tabledb = self.j1939db.get('J1939SATabledb') 690 | 691 | header_row, header_row_num = self.get_header_row(sheet) 692 | 693 | source_address_id_col = self.get_any_header_column(header_row, 'SOURCE_ADDRESS_ID') 694 | name_col = self.get_any_header_column(header_row, 'NAME') 695 | 696 | for i in range(header_row_num+1, sheet.nrows): 697 | row = sheet.row_values(i) 698 | 699 | name = row[name_col] 700 | if name.startswith('thru') or name.startswith('through'): 701 | start_range = int(row[source_address_id_col]) 702 | range_clues = name.replace('thru', '').replace('through', '') 703 | range_clues = range_clues.strip() 704 | end_range = int(range_clues.split(' ')[0]) 705 | description = ''.join(name.split(str(end_range))[1:]).strip() 706 | description = description + ' ' + row[name_col + 1] 707 | description = re.sub(r'^are ', '', description) 708 | description = description.strip() 709 | for val in range(start_range, end_range + 1): 710 | j1939_sa_tabledb.update({str(val): description}) 711 | else: 712 | val = str(int(row[source_address_id_col])) 713 | name = name.strip() 714 | j1939_sa_tabledb.update({val: name}) 715 | return 716 | 717 | def convert(self, output_file): 718 | self.j1939db = OrderedDict() 719 | sheet_name = ['SPNs & PGNs', 'SPs & PGs'] 720 | self.process_spns_and_pgns_tab(self.find_first_sheet_by_name(sheet_name)) 721 | sheet_name = 'Global Source Addresses (B2)' 722 | self.process_any_source_addresses_sheet(self.find_first_sheet_by_name(sheet_name)) 723 | sheet_name = 'IG1 Source Addresses (B3)' 724 | self.process_any_source_addresses_sheet(self.find_first_sheet_by_name(sheet_name)) 725 | 726 | out = open(output_file, 'w') if output_file != '-' else sys.stdout 727 | 728 | try: 729 | out.write(json.dumps(self.j1939db, indent=2, sort_keys=False)) 730 | except BrokenPipeError: 731 | pass 732 | 733 | if out is not sys.stdout: 734 | out.close() 735 | 736 | return 737 | 738 | def find_first_sheet_by_name(self, sheet_names): 739 | if not isinstance(sheet_names, list): 740 | sheet_names = [sheet_names] 741 | for sheet_name in sheet_names: 742 | for book in self.digital_annex_xls_list: 743 | if sheet_name in book.sheet_names(): 744 | sheet = book.sheet_by_name(sheet_name) 745 | return sheet 746 | return None 747 | 748 | 749 | all_inputs = itertools.chain(*args.digital_annex_xls) 750 | J1939daConverter(all_inputs).convert(args.write_json) 751 | -------------------------------------------------------------------------------- /pretty_j1939.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import bitstring 4 | import argparse 5 | import sys 6 | import json 7 | 8 | import pretty_j1939.describe 9 | 10 | 11 | parser = argparse.ArgumentParser(description='pretty-printing J1939 candump logs') 12 | parser.add_argument('candump', help='candump log, use - for stdin') 13 | 14 | parser.add_argument('--da-json', type=str, const=True, default=pretty_j1939.describe.DEFAULT_DA_JSON, nargs='?', 15 | help='absolute path to the input JSON DA (default=\"./J1939db.json\")') 16 | 17 | parser.add_argument('--candata', dest='candata', action='store_true', help='print input can data') 18 | parser.add_argument('--no-candata', dest='candata', action='store_false', help='(default)') 19 | parser.set_defaults(candata=pretty_j1939.describe.DEFAULT_CANDATA) 20 | 21 | parser.add_argument('--pgn', dest='pgn', action='store_true', help='(default) print source/destination/type ' 22 | 'description') 23 | parser.add_argument('--no-pgn', dest='pgn', action='store_false') 24 | parser.set_defaults(pgn=pretty_j1939.describe.DEFAULT_PGN) 25 | 26 | parser.add_argument('--spn', dest='spn', action='store_true', help='(default) print signals description') 27 | parser.add_argument('--no-spn', dest='spn', action='store_false') 28 | parser.set_defaults(spn=pretty_j1939.describe.DEFAULT_SPN) 29 | 30 | parser.add_argument('--transport', dest='transport', action='store_true', help='print details of transport-layer ' 31 | 'streams found (default)') 32 | parser.add_argument('--no-transport', dest='transport', action='store_false', help='') 33 | parser.set_defaults(transport=pretty_j1939.describe.DEFAULT_TRANSPORT) 34 | 35 | parser.add_argument('--link', dest='link', action='store_true', help='print details of link-layer frames found') 36 | parser.add_argument('--no-link', dest='link', action='store_false', help='(default)') 37 | parser.set_defaults(link=pretty_j1939.describe.DEFAULT_LINK) 38 | 39 | parser.add_argument('--include-na', dest='include_na', action='store_true', help='include not-available (0xff) SPN ' 40 | 'values') 41 | parser.add_argument('--no-include-na', dest='include_na', action='store_false', help='(default)') 42 | parser.set_defaults(include_na=pretty_j1939.describe.DEFAULT_INCLUDE_NA) 43 | 44 | parser.add_argument('--real-time', dest='real_time', action='store_true', help='emit SPNs as they are seen in ' 45 | 'transport sessions') 46 | parser.add_argument('--no-real-time', dest='real_time', action='store_false', help='(default)') 47 | parser.set_defaults(real_time=pretty_j1939.describe.DEFAULT_REAL_TIME) 48 | 49 | parser.add_argument('--format', dest='format', action='store_true', help='format each structure (otherwise ' 50 | 'single-line)') 51 | parser.add_argument('--no-format', dest='format', action='store_false', help='(default)') 52 | parser.set_defaults(format=False) 53 | 54 | 55 | args = parser.parse_args() 56 | 57 | 58 | def process_lines(candump_file): 59 | for candump_line in candump_file.readlines(): 60 | if candump_line == '\n': 61 | continue 62 | 63 | try: 64 | message = candump_line.split()[2] 65 | message_id = bitstring.ConstBitArray(hex=message.split('#')[0]) 66 | message_data = bitstring.ConstBitArray(hex=message.split('#')[1]) 67 | except (IndexError, ValueError): 68 | print("Warning: error in line '%s'" % candump_line, file=sys.stderr) 69 | continue 70 | 71 | desc_line = '' 72 | 73 | description = describe(message_data.bytes, message_id.uint) 74 | if args.format: 75 | json_description = str(json.dumps(description, indent=4)) 76 | else: 77 | json_description = str(json.dumps(description, separators=(',', ':'))) 78 | if len(description) > 0: 79 | desc_line = desc_line + json_description 80 | 81 | if args.candata: 82 | can_line = candump_line.rstrip() + " ; " 83 | if not args.format: 84 | desc_line = can_line + desc_line 85 | else: 86 | formatted_lines = desc_line.splitlines() 87 | if len(formatted_lines) == 0: 88 | desc_line = can_line 89 | else: 90 | first_line = formatted_lines[0] 91 | desc_line = can_line + first_line 92 | formatted_lines.remove(first_line) 93 | 94 | for line in formatted_lines: 95 | desc_line = desc_line + '\n' + ' ' * len(candump_line) + "; " + line 96 | 97 | if len(desc_line) > 0: 98 | print(desc_line) 99 | 100 | 101 | if __name__ == '__main__': 102 | describe = pretty_j1939.describe.get_describer( 103 | da_json=args.da_json, 104 | describe_pgns=args.pgn, 105 | describe_spns=args.spn, 106 | describe_link_layer=args.link, 107 | describe_transport_layer=args.transport, 108 | real_time=args.real_time, 109 | include_transport_rawdata=args.candata, 110 | include_na=args.include_na) 111 | if args.candump == '-': 112 | f = sys.stdin 113 | else: 114 | f = open(args.candump, 'r') 115 | process_lines(f) 116 | -------------------------------------------------------------------------------- /pretty_j1939/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nmfta-repo/pretty_j1939/4e5c497bfd12ba89fb95b4711309603e5a1e586d/pretty_j1939/__init__.py -------------------------------------------------------------------------------- /pretty_j1939/describe.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2019 National Motor Freight Traffic Association Inc. All Rights Reserved. 3 | # See the file "LICENSE" for the full license governing this code. 4 | # 5 | 6 | import json 7 | import bitstring 8 | import sys 9 | import math 10 | from collections import OrderedDict 11 | 12 | PGN_LABEL = 'PGN' 13 | 14 | NA_NAN = float('nan') 15 | EMPTY_BITS = bitstring.ConstBitArray(bytes=b'') 16 | 17 | DA_MASK = 0x0000FF00 18 | SA_MASK = 0x000000FF 19 | PF_MASK = 0x00FF0000 20 | TM_MASK = 0x00EB0000 21 | CM_MASK = 0x00EC0000 22 | ACK_MASK = 0x0E80000 23 | 24 | 25 | class DADescriber: 26 | pgn_objects = dict() 27 | spn_objects = dict() 28 | address_names = dict() 29 | bit_encodings = dict() 30 | 31 | def __init__(self, da_json, describe_pgns, describe_spns, describe_link_layer, describe_transport_layer, 32 | real_time, include_transport_rawdata, include_na): 33 | with open(da_json, 'r') as j1939_file: 34 | j1939db = json.load(j1939_file) 35 | for pgn_label, pgn_object in j1939db['J1939PGNdb'].items(): 36 | # TODO check for all expected fields on each object 37 | self.pgn_objects.update({int(pgn_label): pgn_object}) 38 | 39 | for spn_label, spn_object in j1939db['J1939SPNdb'].items(): 40 | # TODO check for all expected fields on each object 41 | self.spn_objects.update({int(spn_label): spn_object}) 42 | 43 | for address, address_name in j1939db['J1939SATabledb'].items(): 44 | # TODO check for all expected fields on each object 45 | self.address_names.update({int(address): address_name}) 46 | 47 | for spn_label, bit_encoding in j1939db['J1939BitDecodings'].items(): 48 | # TODO check for all expected fields on each object 49 | self.bit_encodings.update({int(spn_label): bit_encoding}) 50 | self.da_json = da_json 51 | self.describe_pgns = describe_pgns 52 | self.describe_spns = describe_spns 53 | self.describe_link_layer = describe_link_layer 54 | self.describe_transport_layer = describe_transport_layer 55 | self.real_time = real_time 56 | self.include_transport_rawdata = include_transport_rawdata 57 | self.include_na = include_na 58 | 59 | def get_pgn_acronym(self, pgn): 60 | pgn_object = self.pgn_objects.get(pgn) 61 | if pgn_object is None: 62 | return "Unknown" 63 | acronym = pgn_object["Label"] 64 | if acronym == '': 65 | acronym = "Unknown" 66 | return acronym 67 | 68 | def get_spn_name(self, spn): 69 | spn_object = self.spn_objects.get(spn) 70 | if spn_object is None: 71 | return "Unknown" 72 | return spn_object["Name"] 73 | 74 | def get_formatted_address_and_name(self, address): 75 | if address == 255: 76 | formatted_address = "(255)" 77 | address_name = "All" 78 | else: 79 | formatted_address = "({:3d})".format(address) 80 | address_name = self.address_names.get(address) 81 | if address_name is None: 82 | address_name = "Unknown" 83 | return formatted_address, address_name 84 | 85 | def describe_message_id(self, message_id): 86 | description = {} 87 | 88 | pgn, da, sa = parse_j1939_id(message_id) 89 | da_formatted_address, da_address_name = self.get_formatted_address_and_name(da) 90 | sa_formatted_address, sa_address_name = self.get_formatted_address_and_name(sa) 91 | 92 | description['PGN'] = self.get_pgn_description(pgn) 93 | description['DA'] = "%s%s" % (da_address_name, da_formatted_address) 94 | description['SA'] = "%s%s" % (sa_address_name, sa_formatted_address) 95 | return description 96 | 97 | def get_pgn_description(self, pgn): 98 | pgn_acronym = self.get_pgn_acronym(pgn) 99 | pgn_description = "%s(%s)" % (pgn_acronym, pgn) 100 | return pgn_description 101 | 102 | def lookup_all_spn_params(self, _, spn, pgn): 103 | # look up items in the database 104 | name = self.get_spn_name(spn) 105 | spn_object = self.spn_objects.get(spn, {}) 106 | units = spn_object["Units"] 107 | spn_length = spn_object["SPNLength"] 108 | offset = spn_object["Offset"] 109 | 110 | spn_start = self.lookup_spn_startbit(spn_object, spn, pgn) 111 | 112 | scale = spn_object["Resolution"] 113 | if scale <= 0: 114 | scale = 1 115 | 116 | spn_end = spn_start + spn_length - 1 117 | 118 | return name, offset, scale, spn_end, spn_length, spn_start, units 119 | 120 | def lookup_spn_startbit(self, spn_object, spn, pgn): 121 | # support earlier versions of J1939db.json which did not include PGN-to-SPN mappings at the PGN 122 | spn_start = spn_object.get("StartBit") 123 | if spn_start is None: # otherwise, try to use the SPN bit position information at the PGN 124 | pgn_object = self.pgn_objects.get(pgn, {}) 125 | spns_in_pgn = pgn_object["SPNs"] 126 | startbits_in_pgn = pgn_object["SPNStartBits"] 127 | spn_start = startbits_in_pgn[spns_in_pgn.index(spn)] 128 | 129 | # finally, support earlier versions of J1939db.json which did not include multi-startbit SPNs 130 | if not type(spn_start) is list: 131 | spn_start = [spn_start] 132 | 133 | return spn_start 134 | 135 | def get_spn_bytes(self, message_data_bitstring, spn, pgn, is_complete_message): 136 | spn_object = self.spn_objects.get(spn, {}) 137 | spn_length = spn_object["SPNLength"] 138 | spn_start = self.lookup_spn_startbit(spn_object, spn, pgn) 139 | 140 | if type(spn_length) is str and spn_length.startswith("Variable"): 141 | delimiter = spn_object.get("Delimiter") 142 | pgn_object = self.pgn_objects.get(pgn, {}) 143 | spn_list = pgn_object["SPNs"] 144 | if delimiter is None: 145 | if len(spn_list) == 1: 146 | if is_complete_message: 147 | return get_spn_cut_bytes(spn_start, len(message_data_bitstring.bytes) * 8, 148 | message_data_bitstring, is_complete_message) 149 | else: 150 | return EMPTY_BITS 151 | else: 152 | print("Warning: skipping SPN %d in non-delimited and multi-spn and variable-length PGN %d" 153 | " (this is most-likely a problem in the JSONdb or source DA)" % (spn, pgn), file=sys.stderr) 154 | return EMPTY_BITS # no way to handle multi-spn messages without a delimiter 155 | else: 156 | spn_ordinal = spn_list.index(spn) 157 | 158 | delimiter = delimiter.replace('0x', '') 159 | delimiter = bytes.fromhex(delimiter) 160 | spn_fields = message_data_bitstring.bytes.split(delimiter) 161 | 162 | if not is_complete_message and len(spn_fields) == 1: # delimiter is not found 163 | return EMPTY_BITS 164 | 165 | if spn_start != [-1]: # variable-len field with defined start; must be first variable-len field 166 | spn_end = len(spn_fields[0]) * 8 - 1 167 | cut_data = bitstring.Bits(bytes=spn_fields[0])[spn_start[0]:spn_end + 1] 168 | return cut_data 169 | else: # variable-len field with unspecified start; requires field counting 170 | startbits_list = pgn_object["SPNStartBits"] 171 | num_fixedlen_spn_fields = sum(1 for s in startbits_list if s != -1) 172 | variable_spn_ordinal = spn_ordinal - num_fixedlen_spn_fields 173 | if num_fixedlen_spn_fields > 0: 174 | variable_spn_fields = spn_fields[1:] 175 | else: 176 | variable_spn_fields = spn_fields 177 | try: 178 | cut_data = bitstring.Bits(bytes=variable_spn_fields[variable_spn_ordinal]) 179 | except IndexError: 180 | cut_data = EMPTY_BITS 181 | return cut_data 182 | else: 183 | return get_spn_cut_bytes(spn_start, spn_length, message_data_bitstring, is_complete_message) 184 | 185 | # returns a float in units of the SPN, or NaN if the value of the SPN value is not available in the message_data, or 186 | # None if the message is incomplete and SPN data is not available. 187 | # if validate == True, raises a ValueError if the value is present in message_data but is beyond the operational 188 | # range 189 | def get_spn_value(self, message_data_bitstring, spn, pgn, is_complete_message, validate=True): 190 | spn_object = self.spn_objects.get(spn, {}) 191 | units = spn_object["Units"] 192 | 193 | offset = spn_object["Offset"] 194 | scale = spn_object["Resolution"] 195 | if scale <= 0: 196 | scale = 1 197 | 198 | cut_data = bitstring.BitArray(self.get_spn_bytes(message_data_bitstring, spn, pgn, is_complete_message)) 199 | if (not is_complete_message) and cut_data.length == 0: # incomplete SPN 200 | return None 201 | 202 | if cut_data.all(True): # value unavailable in message_data 203 | return NA_NAN 204 | 205 | cut_data.byteswap() 206 | if is_spn_bitencoded(units): 207 | value = cut_data.uint 208 | else: 209 | value = cut_data.uint * scale + offset 210 | 211 | if validate: 212 | operational_min = spn_object["OperationalLow"] 213 | operational_max = spn_object["OperationalHigh"] 214 | if value < operational_min or value > operational_max: 215 | raise ValueError 216 | 217 | return value 218 | 219 | def describe_message_data(self, pgn, message_data_bitstring, is_complete_message=True, skip_spns=None): 220 | if skip_spns is None: # TODO have one default for skip_spns 221 | skip_spns = {} 222 | description = OrderedDict() 223 | if is_transport_pgn(pgn): # transport messages can't be accurately parsed by the DA description 224 | return description 225 | 226 | pgn_object = self.pgn_objects.get(pgn, {}) 227 | for spn in pgn_object.get("SPNs", []): 228 | if skip_spns.get(spn, ()) != (): # skip any SPNs that have already been processed. 229 | continue 230 | spn_name = self.get_spn_name(spn) 231 | spn_units = self.spn_objects.get(spn)["Units"] 232 | 233 | def mark_spn_covered(new_spn, new_spn_name, new_spn_description): 234 | skip_spns[new_spn] = (new_spn_name, new_spn_description) # TODO: move this closer to real-time handling 235 | 236 | def add_spn_description(new_spn, new_spn_name, new_spn_description): 237 | description[new_spn_name] = new_spn_description 238 | mark_spn_covered(new_spn, new_spn_name, new_spn_description) 239 | 240 | try: 241 | if is_spn_numerical_values(spn_units): 242 | spn_value = self.get_spn_value(message_data_bitstring, spn, pgn, is_complete_message) 243 | if (not is_complete_message) and (spn_value is None): # incomplete message 244 | continue 245 | elif math.isnan(spn_value): 246 | if self.include_na: 247 | add_spn_description(spn, spn_name, "N/A") 248 | else: 249 | mark_spn_covered(spn, spn_name, "N/A") 250 | elif is_spn_bitencoded(spn_units): 251 | try: 252 | enum_descriptions = self.bit_encodings.get(spn) 253 | if enum_descriptions is None: 254 | add_spn_description(spn, spn_name, "%d (Unknown)" % spn_value) 255 | continue 256 | spn_value_description = enum_descriptions[str(int(spn_value))].strip() 257 | add_spn_description(spn, spn_name, "%d (%s)" % (spn_value, spn_value_description)) 258 | except KeyError: 259 | add_spn_description(spn, spn_name, "%d (Unknown)" % spn_value) 260 | else: 261 | add_spn_description(spn, spn_name, "%s [%s]" % (spn_value, spn_units)) 262 | else: 263 | spn_bytes = self.get_spn_bytes(message_data_bitstring, spn, pgn, is_complete_message) 264 | if spn_bytes.length == 0 and not is_complete_message: # incomplete message 265 | continue 266 | else: 267 | if spn_units.lower() in ("request dependent",): 268 | add_spn_description(spn, spn_name, "%s (%s)" % (spn_bytes, spn_units)) 269 | elif spn_units.lower() in ("ascii",): 270 | add_spn_description(spn, spn_name, "%s" % spn_bytes.bytes.decode(encoding="utf-8")) 271 | else: 272 | add_spn_description(spn, spn_name, "%s" % spn_bytes) 273 | 274 | except ValueError: 275 | add_spn_description(spn, spn_name, "%s (%s)" % ( 276 | self.get_spn_bytes(message_data_bitstring, spn, pgn, is_complete_message), "Out of range")) 277 | 278 | return description 279 | 280 | 281 | def parse_j1939_id(can_id): 282 | sa = SA_MASK & can_id 283 | pf = (PF_MASK & can_id) >> 16 284 | da = (DA_MASK & can_id) >> 8 285 | 286 | if pf >= 240: # PDU2 format 287 | pgn = pf * 256 + da 288 | da = 0xFF 289 | else: 290 | pgn = pf * 256 291 | return pgn, da, sa 292 | 293 | 294 | def is_connection_management_message(message_id): 295 | return (message_id & PF_MASK) == CM_MASK 296 | 297 | 298 | def is_connection_management_pgn(pgn): 299 | return pgn == CM_MASK >> 8 300 | 301 | 302 | def is_data_transfer_message(message_id): 303 | return (message_id & PF_MASK) == TM_MASK 304 | 305 | 306 | def is_data_transfer_pgn(pgn): 307 | return pgn == TM_MASK >> 8 308 | 309 | 310 | def is_ack_message(message_id): 311 | return (message_id & PF_MASK) == ACK_MASK 312 | 313 | 314 | def is_ack_pgn(pgn): 315 | return pgn == ACK_MASK >> 8 316 | 317 | 318 | def is_transport_message(message_id): 319 | return is_data_transfer_message(message_id) or \ 320 | is_connection_management_message(message_id) or \ 321 | is_ack_message(message_id) 322 | 323 | 324 | def is_transport_pgn(pgn): 325 | return is_data_transfer_pgn(pgn) or is_connection_management_pgn(pgn) or is_ack_pgn(pgn) 326 | 327 | 328 | def is_bam_rts_cts_message(message_bytes): 329 | return (message_bytes[0] == 32 or 330 | message_bytes[0] == 16) 331 | 332 | 333 | def get_spn_cut_bytes(spn_start, spn_length, message_data_bitstring, is_complete_message): 334 | spn_end = spn_start[0] + spn_length - 1 335 | if not is_complete_message and spn_end > message_data_bitstring.length: 336 | return bitstring.Bits(bytes=b'') 337 | 338 | cut_data = message_data_bitstring[spn_start[0]:spn_end + 1] 339 | if len(spn_start) > 1: 340 | lsplit = int(spn_start[1] / 8) * 8 - spn_start[0] 341 | rsplit = spn_length - lsplit 342 | b = bitstring.BitArray(message_data_bitstring[spn_start[0]:spn_start[0] + lsplit]) 343 | b.append(message_data_bitstring[spn_start[1]:spn_start[1] + rsplit]) 344 | cut_data = b 345 | return cut_data 346 | 347 | 348 | def is_spn_bitencoded(spn_units): 349 | return spn_units.lower() in ("bit", "binary",) 350 | 351 | 352 | def is_spn_numerical_values(spn_units): 353 | norm_units = spn_units.lower() 354 | return norm_units not in ("manufacturer determined", "byte", "", "request dependent", "ascii") 355 | 356 | 357 | class TransportTracker: 358 | new_pgn = {} 359 | new_data = {} 360 | new_count = {} 361 | new_length = {} 362 | spn_coverage = {} 363 | 364 | def __init__(self, real_time): 365 | self.is_real_time = real_time 366 | 367 | def process(self, transport_found_processor, message_bytes, message_id): 368 | _, da, sa = parse_j1939_id(message_id) 369 | if is_connection_management_message(message_id) and is_bam_rts_cts_message(message_bytes): # track new conn 370 | self.new_pgn[(da, sa)] = (message_bytes[7] << 16) + (message_bytes[6] << 8) + message_bytes[5] 371 | self.new_length[(da, sa)] = (message_bytes[2] << 8) + message_bytes[1] 372 | self.new_count[(da, sa)] = message_bytes[3] 373 | self.new_data[(da, sa)] = [None for _ in range(7 * self.new_count[(da, sa)])] 374 | elif is_data_transfer_message(message_id): 375 | if (da, sa) in self.new_data.keys(): 376 | packet_number = message_bytes[0] 377 | for b, i in zip(message_bytes[1:], range(7)): 378 | try: 379 | self.new_data[(da, sa)][7 * (packet_number - 1) + i] = b 380 | except Exception as e: 381 | print(e) 382 | is_last_packet = packet_number == self.new_count[(da, sa)] 383 | 384 | if self.is_real_time: 385 | data_bytes = self.new_data[(da, sa)][0:packet_number * 7] 386 | if None not in data_bytes: 387 | data_bytes = bytes(data_bytes) 388 | transport_found_processor(data_bytes, sa, self.new_pgn[(da, sa)], 389 | spn_coverage=self.spn_coverage, 390 | is_last_packet=is_last_packet) 391 | elif is_last_packet: 392 | data_bytes = self.new_data[(da, sa)][0:self.new_length[(da, sa)]] 393 | if None not in data_bytes: 394 | data_bytes = bytes(data_bytes) 395 | transport_found_processor(data_bytes, sa, self.new_pgn[(da, sa)], 396 | is_last_packet=True) 397 | 398 | 399 | class J1939Describer: 400 | da_describer: DADescriber = None 401 | transport_tracker: TransportTracker = None 402 | 403 | def __init__(self, describe_link_layer, describe_pgns, describe_spns, describe_transport_layer, 404 | include_transport_rawdata, include_na): 405 | self.describe_link_layer = describe_link_layer 406 | self.describe_pgns = describe_pgns 407 | self.describe_spns = describe_spns 408 | self.describe_transport_layer = describe_transport_layer 409 | self.include_transport_rawdata = include_transport_rawdata 410 | self.include_na = include_na 411 | 412 | self.transport_messages = list() 413 | 414 | def set_da_describer(self, da_describer): 415 | self.da_describer = da_describer 416 | 417 | def set_transport_tracker(self, transport_tracker): 418 | self.transport_tracker = transport_tracker 419 | 420 | def __call__(self, message_data_bytes: bitstring.Bits, message_id_uint: bitstring.Bits): 421 | self.transport_messages.clear() 422 | 423 | def on_transport_found(data_bytes, found_sa, found_pgn, spn_coverage=None, is_last_packet=False): 424 | if spn_coverage is None: 425 | spn_coverage = {} 426 | transport_found = dict() 427 | transport_found[PGN_LABEL] = found_pgn 428 | transport_found['SA'] = found_sa 429 | transport_found['data'] = data_bytes 430 | transport_found['spn_coverage'] = spn_coverage 431 | transport_found['is_last_packet'] = is_last_packet 432 | self.transport_messages.append(transport_found) 433 | 434 | is_transport_lower_layer_message = is_transport_message(message_id_uint) 435 | if is_transport_lower_layer_message and self.describe_transport_layer: 436 | self.transport_tracker.process(on_transport_found, message_data_bytes, message_id_uint) 437 | 438 | description = OrderedDict() 439 | 440 | is_describe_this_frame = (not is_transport_lower_layer_message) # always print 'complete' J1939 frames 441 | is_describe_this_frame |= is_transport_lower_layer_message \ 442 | and self.describe_link_layer # or others when configured to describe 'link layer' frames 443 | 444 | if is_describe_this_frame: 445 | if self.describe_pgns: 446 | description.update(self.da_describer.describe_message_id(message_id_uint)) 447 | 448 | if self.describe_spns: 449 | pgn, _, _ = parse_j1939_id(message_id_uint) 450 | message_description = self.da_describer. \ 451 | describe_message_data(pgn, 452 | bitstring.Bits(bytes=message_data_bytes), 453 | is_complete_message=True) 454 | description.update(message_description) 455 | 456 | if self.describe_transport_layer and len(self.transport_messages) > 0: 457 | transport_message = self.transport_messages[0] 458 | transport_pgn = transport_message[PGN_LABEL] 459 | if self.describe_pgns: 460 | description.update(self.da_describer.describe_message_id(message_id_uint)) 461 | transport_pgn_description = self.da_describer.get_pgn_description(transport_pgn) 462 | if self.describe_link_layer: # when configured to describe 'link layer' don't collide with PGN 463 | description.update({'Transport PGN': transport_pgn_description}) 464 | else: # otherwise (and default) describe the transport PGN as _the_ PGN 465 | description.update({PGN_LABEL: transport_pgn_description}) 466 | 467 | is_complete_message = transport_message['is_last_packet'] 468 | if self.describe_spns: 469 | pgn = transport_pgn 470 | message_description = self.da_describer. \ 471 | describe_message_data(pgn, 472 | bitstring.Bits(bytes=transport_message['data']), 473 | is_complete_message=is_complete_message, 474 | skip_spns=transport_message['spn_coverage']) 475 | description.update(message_description) 476 | 477 | if is_complete_message and self.include_transport_rawdata: 478 | transport_data = str(bitstring.Bits(bytes=transport_message['data'])) 479 | description.update({'Transport Data': transport_data}) 480 | 481 | return description 482 | 483 | 484 | DEFAULT_DA_JSON = "J1939db.json" 485 | DEFAULT_CANDATA = False 486 | DEFAULT_PGN = True 487 | DEFAULT_SPN = True 488 | DEFAULT_TRANSPORT = True 489 | DEFAULT_LINK = False 490 | DEFAULT_INCLUDE_NA = False 491 | DEFAULT_REAL_TIME = False 492 | 493 | 494 | def get_describer(da_json=DEFAULT_DA_JSON, 495 | describe_pgns=DEFAULT_PGN, 496 | describe_spns=DEFAULT_SPN, 497 | describe_link_layer=DEFAULT_LINK, 498 | describe_transport_layer=DEFAULT_TRANSPORT, 499 | real_time=DEFAULT_REAL_TIME, 500 | include_transport_rawdata=DEFAULT_CANDATA, # TODO: separate show transport data from candata 501 | include_na=DEFAULT_INCLUDE_NA): 502 | describer = J1939Describer(describe_pgns=describe_pgns, 503 | describe_spns=describe_spns, 504 | describe_link_layer=describe_link_layer, 505 | describe_transport_layer=describe_transport_layer, 506 | include_transport_rawdata=include_transport_rawdata, 507 | include_na=include_na) 508 | 509 | transport_tracker = TransportTracker(real_time=real_time) 510 | describer.set_transport_tracker(transport_tracker) 511 | 512 | da_describer = DADescriber(da_json=da_json, 513 | describe_pgns=describe_pgns, 514 | describe_spns=describe_spns, 515 | describe_link_layer=describe_link_layer, 516 | describe_transport_layer=describe_transport_layer, 517 | real_time=real_time, 518 | include_transport_rawdata=include_transport_rawdata, 519 | include_na=include_na) 520 | describer.set_da_describer(da_describer) 521 | 522 | return describer 523 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as readme: 4 | long_description = readme.read() 5 | 6 | setuptools.setup( 7 | name="pretty_j1939", 8 | version="0.0.2", 9 | author='"Ben Gardiner ", "Jeremy Daily ", Subhojeet Mukherjee ', 10 | author_email='ben.gardiner@nmfta.org', 11 | description="python libs and scripts for pretty-printing J1939 logs", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/nmfta-repo/pretty_j1939", 15 | packages=setuptools.find_packages(), 16 | install_requires=[ 17 | 'asteval', 18 | 'defusedxml', 19 | 'unidecode', 20 | 'xlrd', 21 | 'bitstring<4.0', 22 | ], 23 | scripts=[ 24 | 'create_j1939db-json.py', 25 | 'pretty_j1939.py', 26 | ], 27 | classifiers=[ 28 | "Programming Language :: Python :: 3", 29 | "Operating System :: OS Independent", 30 | ], 31 | ) 32 | -------------------------------------------------------------------------------- /testme.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | function die 3 | { 4 | echo $* 5 | exit 1 6 | } 7 | 8 | for i in tmp/*.xls; do 9 | python3 create_j1939db-json.py -f ${i} -w ${i/.xls/.json} || die 10 | done 11 | 12 | while read args; do 13 | for da in tmp/*.json; do 14 | for log in tmp/*.log; do 15 | python3 pretty_j1939.py $args --da-json $da $log > /dev/null || die 16 | head $log | python3 pretty_j1939.py $args --da-json $da - > /dev/null || die 17 | done 18 | done 19 | done <