├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── analysis.py ├── csvexport.py ├── hantek1008.py ├── utils ├── __init__.py ├── common.py ├── csvwriter.py └── electro.py └── zoscf_log.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hantek1008Driver 2 | 3 | This project provides tooling for using Hantek 1008 USB-oscilloscopes 4 | without proprietary software on Linux and Windows (not tested). 5 | You can include the Hantek1008 class from 'hantek1008.py' in your project 6 | to get access to the features of the device. 7 | Alternatively use the csvexport.py Python application to gain data and save it to a file. 8 | 9 | This project is based on careful reverse engineering of the device's USB protocol. 10 | The reverse engineering was only done to the extent necessary to obtain data for my master's thesis and 11 | does not cover all the features and configuration options of the device. 12 | Therefore, no guarantees can be made as to the reliability or accuracy of the data collected. 13 | 14 | ### Usageexample of csvexport.py: 15 | `python3 csvexport.py mydata.csv -s 1 2` 16 | This will write the measured data of channel 1 and 2 to 'mydata.csv' until you press CTRL+C to stop the measurement. 17 | 18 | ### Help Options: 19 | `python3 csvexport.py --help` 20 | This will show you all the available options/parameters and explains them in-depth. 21 | 22 | ### Notes: 23 | * Requires Python >= 3.6 24 | * Requires *pyusb* and *overrides* (install it using pip: `pip3 install pyusb overrides`) 25 | * If the software can not access the usb device because of lacking accessright, do the following (tested on linux/fedora): 26 | 1. Create file "/etc/udev/rules.d/99-hantek1008.rules" with content: 27 | ACTION=="add", SUBSYSTEM=="usb", ATTRS{idVendor}=="0783", ATTR{idProduct}=="5725", MODE="0666" 28 | 2. Then `sudo udevadm control -R` 29 | 3. Replug the device 30 | * The code contains many assert statements. 31 | They exist because I noticed at the time that the respective responses on my device were always the same. 32 | I was not able (nor was there any need) to find out what these responses meant, 33 | but I wanted to be notified if the response changed for any reason, hence the assert statements. 34 | With a different copy of the device, you might get different answers. So some asserts may fail. 35 | Therefore, it might be necessary to remove or adapt these assert statements. 36 | 37 | ### Contribution 38 | 39 | This project is open for contributions. 40 | Unfortunately, I have no longer access to a Hantek 1008 device, 41 | so I cannot test contributions. 42 | Also giving support in case of a problem is difficult under these 43 | circumstances, but I will try my best. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mfg92/hantek1008py/302e45ad9d47dafec8a57c36aaca0b76647b221d/__init__.py -------------------------------------------------------------------------------- /analysis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | from typing import List, Dict, Tuple, TextIO, Generator, Optional 5 | import numpy 6 | from utils.common import parse_csv_lines, open_csv_file 7 | import utils.electro as electro 8 | import utils.common as common 9 | import math 10 | import re 11 | from collections import namedtuple 12 | from utils.csvwriter import CsvWriter 13 | import sys 14 | 15 | assert sys.version_info >= (3, 6) 16 | 17 | 18 | VoltAmpChPair = namedtuple("VoltAmpChPair", ["voltage_ch", "ampere_ch", "name"]) 19 | 20 | 21 | def main() -> None: 22 | 23 | def va_pair_type(value: str) -> VoltAmpChPair: 24 | # vaild str eg 1:4 25 | match = re.match(r"(\d):(\d):([A-Za-z0-9_]+)", value) 26 | if not match: 27 | raise argparse.ArgumentTypeError(f"Invalid syntax. Voltage-Ampere-Pairs have to be in " 28 | f"the format: v:a:NAME eg. 1:4:L1") 29 | 30 | volt_amp_ch_pair = VoltAmpChPair(int(match.group(1)), 31 | int(match.group(2)), 32 | match.group(3)) 33 | 34 | def check_channel(channel: int) -> None: 35 | if not 1 <= channel <= 8: 36 | raise argparse.ArgumentTypeError(f"There is no channel {channel}") 37 | 38 | check_channel(volt_amp_ch_pair.voltage_ch) 39 | check_channel(volt_amp_ch_pair.ampere_ch) 40 | 41 | if volt_amp_ch_pair.voltage_ch == volt_amp_ch_pair.ampere_ch: 42 | raise argparse.ArgumentTypeError(f"Voltage ({volt_amp_ch_pair.voltage_ch}) " 43 | f"and ampere ({volt_amp_ch_pair.ampere_ch}) channel must be different ") 44 | return volt_amp_ch_pair 45 | 46 | def channel_type(value: str) -> int: 47 | ivalue = int(value) 48 | if 1 <= ivalue <= 2*8: 49 | return ivalue 50 | raise argparse.ArgumentTypeError(f"There is no channel {value}") 51 | 52 | def arg_assert(ok: bool, fail_message: str) -> None: 53 | if not ok: 54 | parser.error(fail_message) 55 | 56 | parser = argparse.ArgumentParser(description="") 57 | parser.add_argument("csv_input", 58 | type=str, default=None, 59 | help="The data file. Can be '-' to take STDIN as data source.") 60 | # TODO: at the moment channel means column not the real channel used 61 | # if e.g. only channel 7 & 8 were recorded, these wer handelt as channel 1&2 in this software 62 | parser.add_argument("voltamp_pairs", nargs="+", 63 | type=va_pair_type, default=None, 64 | help="What channels belong together. Format is: " 65 | "{volt_channel}:{ampere_channel}:{name} e.g. '1:2:L1'") 66 | parser.add_argument("-o", "--output", dest="csv_output", 67 | type=str, default="-", 68 | help="The output file. If it is '-', STDOUT is used. If it is '.', it will use the " 69 | "inputfilename and append 'acsv'. By default it prints to STDOUT.") 70 | parser.add_argument("-w", "--windowsize", dest="window_size", 71 | type=int, default=2048, 72 | help="The size of the window used to analyse the data. One value of " 73 | "each type (e.g. frequency, rms voltage) is computed per window. Default is 2048.") 74 | parser.add_argument("-s", "--stepsize", dest="step_size", 75 | type=int, default=1024, 76 | help="The window is shifted about this amount after each computation round. Default is 1024.") 77 | parser.add_argument("--voltagescale", dest="voltage_scale_factor", 78 | type=float, default=200, 79 | help="Voltage values are scale with this factor before any analysis happens. Default is 200.") 80 | parser.add_argument("--v2afactor", dest="voltage_to_ampere_factor", 81 | # type=float, default=2.857, # computed default value 82 | type=float, default=2.96, # measured default value 83 | help="Ampere values are scale with this factor before any analysis happens. Default is 2.96.") 84 | parser.add_argument("--start", dest="start_sec", 85 | type=float, default=0, 86 | help="Amount of seconds of the data to be skipped at the beginning. Default is 0.") 87 | # parser.add_argument("-s", "--channels", metavar="channel", nargs="+", 88 | # type=channel_type, default=None, 89 | # help="Select channels that are of interest") 90 | 91 | args = parser.parse_args() 92 | 93 | if args.csv_input == "-": 94 | arg_assert(args.csv_output != ".", "If input is read from STDIN, '.' as argument for " 95 | "the output file is not allowed.") 96 | args.csv_output = args.csv_output if args.csv_output != "." else f"{args.csv_input}.acsv" 97 | 98 | csv_input_file = sys.stdin if args.csv_input == "-" else open_csv_file(args.csv_input) 99 | 100 | # read header (all comment lines before the data) 101 | header = [] 102 | channel_names_line = "" 103 | header_data_file_format: bool = "auto" # new versions of csvexport.py produces CSV files 104 | # that start with '# HEADER and then before the actual data come 105 | # a line with '# DATA' comes 106 | if header_data_file_format == "auto": 107 | if args.csv_input == "-": 108 | header_data_file_format = True 109 | else: 110 | header_data_file_format = False 111 | 112 | while True: 113 | line = csv_input_file.readline() 114 | 115 | if header_data_file_format: 116 | if line == "# DATA\n": 117 | break 118 | if not line.startswith("#"): 119 | continue 120 | else: 121 | if not line.startswith("#"): # first data line is ignored 122 | break 123 | if line.startswith("# ch"): 124 | channel_names_line = line 125 | header.append(line) 126 | 127 | device_sampling_rate, measured_sampling_rate, start_time, per_channel_data = parse_csv_lines(header) 128 | 129 | assert len(device_sampling_rate) + len(measured_sampling_rate) > 0, "Found no sample rate in input" 130 | sampling_rate: float = [*device_sampling_rate, *measured_sampling_rate][-1] 131 | assert len(start_time), "There should be exactly one timestamp in the header" 132 | start_time = start_time[0] 133 | channel_count = len(channel_names_line.split(",")) 134 | args.voltamp_pairs = [VoltAmpChPair(vap.voltage_ch-1, vap.ampere_ch-1, vap.name) for vap in args.voltamp_pairs] 135 | 136 | # if args.channels is None: 137 | # args.channels = [*range(0, channel_count-1)] 138 | # else: 139 | # args.channels = [x - 1 for x in args.channels] 140 | 141 | for vap in args.voltamp_pairs: 142 | arg_assert(vap.voltage_ch < channel_count, f"Selected voltage channel {vap.voltage_ch+1} does not exist."); 143 | arg_assert(vap.ampere_ch < channel_count, f"Selected ampere channel {vap.ampere_ch+1} does not exist."); 144 | 145 | csv_output_file = sys.stdout if args.csv_output == "-" else open_csv_file(args.csv_output, mode="wt") 146 | csv_writer = CsvWriter(csv_output_file, delimiter=',') 147 | if csv_output_file != sys.stdout: 148 | print(f"Writing results to '{args.csv_output}'") 149 | 150 | csv_writer.write_comment(f"HEADER") 151 | csv_writer.write_comment(f"source : {args.csv_input}") 152 | csv_writer.write_comment(f"device_sampling_rate : {device_sampling_rate}") 153 | csv_writer.write_comment(f"measured_sampling_rate : {measured_sampling_rate}") 154 | csv_writer.write_comment(f"|->sampling_rate : {sampling_rate} Hz") 155 | csv_writer.write_comment(f"channel count : {channel_count}") 156 | csv_writer.write_comment(f"UNIX time of CSV : {start_time}") 157 | csv_writer.write_comment(f"voltage ampere pairs : {', '.join(f'{name}: {v_ch+1} and {a_ch+1}' for v_ch, a_ch, name in args.voltamp_pairs)}") 158 | csv_writer.write_comment(f"voltage scale SV : {args.voltage_scale_factor}") 159 | csv_writer.write_comment(f"voltage to ampere scale: {args.voltage_to_ampere_factor}") 160 | csv_writer.write_comment(f"DATA") 161 | 162 | values = [] 163 | last_time = None 164 | # work in Watt*sec 165 | PQS_work = {pair_name: [0, 0, 0] for _, _, pair_name in args.voltamp_pairs} 166 | # max_list = [0 for _ in range(0, channel_count)] 167 | for time, value_row in read_value(csv_input_file): 168 | # for i in range(0, channel_count): 169 | # max_list[i] = max(abs(value_row[i]), max_list[i]) 170 | # continue 171 | # skip first args.start_sec seconds of data 172 | if time < start_time + args.start_sec: 173 | continue 174 | 175 | values.append(value_row) 176 | 177 | if len(values) == args.window_size: 178 | mid_time = time - 0.5 * args.window_size * (1.0/sampling_rate) 179 | per_channel_data = list(zip(*values)) 180 | print_window_analysis(csv_writer, 181 | mid_time, 182 | 0 if last_time is None else mid_time - last_time, 183 | per_channel_data, 184 | args.voltamp_pairs, 185 | PQS_work, 186 | sampling_rate, 187 | args.voltage_scale_factor, 188 | args.voltage_to_ampere_factor) 189 | 190 | del values[0:args.step_size] # remove unneeded values 191 | last_time = mid_time 192 | # print(f"max_l3_i: {max_list} V") 193 | return 194 | 195 | 196 | def read_value(csv_file: TextIO) -> Generator[Tuple[float, List[float]], None, None]: 197 | time = None # the time as unix timestamp (sec since 1970 or so) 198 | while True: 199 | line = csv_file.readline() 200 | if line == "": 201 | return 202 | if line.startswith("#"): 203 | match = common.unix_time_regex.search(line) 204 | if match: 205 | time = float(match.group(2)) 206 | # if "UTC" not in line: # older version of csvexport.py uses local time instead of UTC 207 | # time -= 60*60 # older version was only used on on CET so 1h time difference 208 | 209 | elif time is not None: # ignore values that are before first time comment 210 | yield time, [float(x) for x in line.split(",")] 211 | 212 | 213 | def print_window_analysis(csv_writer: CsvWriter, 214 | time: float, 215 | delta: float, 216 | per_channel_data: List[List[float]], 217 | voltamp_pairs: VoltAmpChPair, 218 | PQS_work: Dict[str, List[float]], # PW, QW and SQ per channel in Watt*sec 219 | input_sampling_rate: float, 220 | voltage_scale_factor: float, 221 | voltage_to_ampere_factor: float 222 | ) -> None: 223 | wattsec_to_wh = 1.0 / (60 * 60) 224 | time_str = f"{time:.3f}" 225 | Li_P, Li_Q, Li_S = 0.0, 0.0, 0.0 226 | 227 | # TODO: for test, simple compute average of window per channel 228 | # window_avg_per_channel = [numpy.mean(ch_data) for ch, ch_data in enumerate(per_channel_data)] 229 | # csv_writer.write_row([f"{(time - 1519042692.600763)/(60*60):.4f}"] + [f"{v:.2f}" for v in window_avg_per_channel]) # Wirkleistung 230 | # return 231 | 232 | for voltage_ch, ampere_ch, pair_name in voltamp_pairs: 233 | voltage_data = per_channel_data[voltage_ch] # the directly measured voltage, maybe has to be scaled 234 | ampere_data = per_channel_data[ampere_ch] # the directly measured voltage, has to be converted to ampere 235 | 236 | # scale data 237 | if voltage_scale_factor != 1: 238 | voltage_data = [v * voltage_scale_factor for v in voltage_data] 239 | if voltage_to_ampere_factor != 1: 240 | ampere_data = [v * voltage_to_ampere_factor for v in ampere_data] 241 | 242 | # TODO: for tests only, remove this line afterwards 243 | # ampere_data = [a + 0.03 for a in ampere_data] 244 | # voltage_data = [v + 5 for v in voltage_data] 245 | 246 | voltage_avg_local_min, voltage_avg_local_max = analyse_channel_avg_local_min_max(voltage_data) 247 | voltage_avg_local_min *= voltage_scale_factor 248 | voltage_avg_local_max *= voltage_scale_factor 249 | voltage_avg = numpy.mean(voltage_data) * voltage_scale_factor 250 | 251 | # TODO: for tests only, remove these lines afterwards 252 | # voltage_raw_data = per_channel_data[voltage_ch+7] 253 | # voltage_raw_avg = numpy.mean(voltage_raw_data) 254 | # print(voltage_raw_avg) 255 | 256 | Lx_P, Lx_Q, Lx_S, Lx_phase_angle, Lx_voltage_rms, Lx_ampere_rms =\ 257 | analyse_pair_window(voltage_data, ampere_data) 258 | 259 | Lx_mf_fft_max, Lx_mf_fft_parabolic, Lx_mf_fft_gaussian, Lx_mf_autocorrelate_parabolic, Lx_mf_zerocrossing =\ 260 | analyse_channel_window(voltage_data, input_sampling_rate) 261 | 262 | # TODO: for tests only, remove these lines afterwards 263 | # h = electro.measure_main_frequencies_fft(ampere_data, input_sampling_rate, 3) 264 | # print(h) 265 | 266 | # work in Watt*sec 267 | PQS_work[pair_name][0] += Lx_P * delta 268 | PQS_work[pair_name][1] += Lx_Q * delta 269 | PQS_work[pair_name][2] += Lx_S * delta 270 | 271 | Li_P += Lx_P 272 | Li_Q += Lx_Q 273 | Li_S += Lx_S 274 | 275 | csv_writer.write_row([time_str, f"{pair_name}_PW", f"{PQS_work[pair_name][0]*wattsec_to_wh:.6f}", "Wh"]) 276 | csv_writer.write_row([time_str, f"{pair_name}_QW", f"{PQS_work[pair_name][1]*wattsec_to_wh:.6f}", "Wh"]) 277 | csv_writer.write_row([time_str, f"{pair_name}_SW", f"{PQS_work[pair_name][2]*wattsec_to_wh:.6f}", "Wh"]) 278 | 279 | csv_writer.write_row([time_str, f"{pair_name}_P", f"{Lx_P:.3f}", "W"]) # Wirkleistung 280 | csv_writer.write_row([time_str, f"{pair_name}_Q", f"{Lx_Q:.3f}", "W"]) 281 | csv_writer.write_row([time_str, f"{pair_name}_S", f"{Lx_S:.3f}", "W"]) 282 | csv_writer.write_row([time_str, f"{pair_name}_φ", f"{Lx_phase_angle:.3f}", "°"]) 283 | # _U was _V in an older version 284 | csv_writer.write_row([time_str, f"{pair_name}_U", f"{Lx_voltage_rms:.3f}", "V"]) 285 | csv_writer.write_row([time_str, f"{pair_name}_U_AVGMIN", f"{voltage_avg_local_min:.3f}", "V"]) 286 | csv_writer.write_row([time_str, f"{pair_name}_U_AVGMAX", f"{voltage_avg_local_max:.3f}", "V"]) 287 | csv_writer.write_row([time_str, f"{pair_name}_U_ZOS", f"{0.5 * (voltage_avg_local_min + voltage_avg_local_max):.3f}", "V"]) 288 | csv_writer.write_row([time_str, f"{pair_name}_U_AVG", f"{voltage_avg:.3f}", "V"]) 289 | # _I was _A in an older version 290 | csv_writer.write_row([time_str, f"{pair_name}_I", f"{Lx_ampere_rms:.3f}", "A"]) 291 | 292 | csv_writer.write_row([time_str, f"{pair_name}_F_MAX", f"{Lx_mf_fft_max:.6f}", "Hz"]) 293 | csv_writer.write_row([time_str, f"{pair_name}_F_PAR", f"{Lx_mf_fft_parabolic if Lx_mf_fft_parabolic is not None else -1:.6f}", "Hz"]) 294 | csv_writer.write_row([time_str, f"{pair_name}_F_GAU", f"{Lx_mf_fft_gaussian if Lx_mf_fft_gaussian is not None else -1:.6f}", "Hz"]) 295 | csv_writer.write_row([time_str, f"{pair_name}_F_AUT", f"{Lx_mf_autocorrelate_parabolic:.6f}", "Hz"]) 296 | csv_writer.write_row([time_str, f"{pair_name}_F_ZC", f"{Lx_mf_zerocrossing:.6f}", "Hz"]) 297 | 298 | # TODO: for test only, remove this line afterwards 299 | # sys.exit(0) 300 | 301 | # write sum over all voltamp pairs 302 | csv_writer.write_row([time_str, f"Li_PW", f"{sum(list(zip(*PQS_work.values()))[0]) * wattsec_to_wh:.6f}", "Wh"]) 303 | csv_writer.write_row([time_str, f"Li_QW", f"{sum(list(zip(*PQS_work.values()))[1]) * wattsec_to_wh:.6f}", "Wh"]) 304 | csv_writer.write_row([time_str, f"Li_SW", f"{sum(list(zip(*PQS_work.values()))[2]) * wattsec_to_wh:.6f}", "Wh"]) 305 | 306 | csv_writer.write_row([time_str, f"Li_P", f"{Li_P:.2f}", "W"]) 307 | csv_writer.write_row([time_str, f"Li_Q", f"{Li_Q:.2f}", "W"]) 308 | csv_writer.write_row([time_str, f"Li_S", f"{Li_S:.2f}", "W"]) 309 | 310 | 311 | def analyse_channel_window(channel_values: List[float], input_sampling_rate: float) \ 312 | -> Tuple[float, Optional[float], Optional[float], float, Optional[float]]: 313 | length = len(channel_values) 314 | fourier = numpy.fft.rfft(channel_values * numpy.blackman(length)) 315 | # convert complex -> real 316 | fourier_amplitude = numpy.absolute(fourier) 317 | fourier_phase = numpy.angle(fourier, deg=True) # range: [-180,180] 318 | fourier_frequency = numpy.fft.rfftfreq(n=length, d=1.0 / input_sampling_rate) 319 | fourier_frequency_step_width = fourier_frequency[1] 320 | 321 | # get the highest value (+ index of that) 322 | max_index, max_value = max(enumerate(fourier_amplitude), key=lambda v: v[1]) 323 | 324 | # norm_fac = 1.0 / max_value # max in result will be 1.0 325 | norm_fac = 2.0 / (length/2) # see "y-Axis: The Amplitude of the FFT Signal" in http://www.cbcity.de/die-fft-mit-python-einfach-erklaert 326 | # calculate the frequency that each value in the fourier array belongs to, 327 | # and than builds pairs of frequency and intensity) 328 | fft_amplitude_points = list(zip(fourier_frequency, fourier_amplitude * norm_fac)) 329 | # does the same as the code above, but is 5x slower 330 | # fft_points = [(x / (2 * len(fourier)) * self.__input_sampling_rate, y * norm_fac) 331 | # for x, y in enumerate(fourier)] 332 | 333 | # fft_points[0] is y offset (DC value) 334 | 335 | fft_phase_points = list(zip(fourier_frequency, 0.5 + fourier_phase / 360)) 336 | 337 | auto = numpy.correlate(channel_values, channel_values, mode="full") 338 | # fft_autocorrelation_points = list(zip(fourier_frequency, (auto / max(auto))[round(len(auto) / 2):])) 339 | 340 | # print_column('fft freq steps', f'{fourier_frequency_step_width:.9f} Hz') 341 | 342 | # amplitude_trashold = 0.1 * max_value 343 | # crucial_sins = [(freq, amp, phase) for freq, amp, phase 344 | # in zip(fourier_frequency, fourier_amplitude, fourier_phase) 345 | # if amp >= amplitude_trashold] 346 | # max_sin = max(crucial_sins, key=lambda v: v[1]) 347 | # print(sep="\n", *(f"{freq:7.3f} Hz, {(max(phase,max_sin[2])-min(phase, max_sin[2])):6.2f}°: {amp:.3f}" 348 | # for freq, amp, phase in crucial_sins)) 349 | 350 | main_frequency: float = fft_amplitude_points[max_index][0] 351 | main_frequency_phase: float = fft_phase_points[max_index][0] 352 | 353 | # print_column('main frequency(fft, max)', f'{main_frequency:.4f} Hz + {main_frequency_phase:6.2f}°') 354 | mf_fft_parabolic = mf_fft_gaussian = None 355 | if 0 < max_index < len(fft_amplitude_points): 356 | mf_fft_parabolic = electro.parabolic_interpolation(fourier_amplitude, max_index) * fourier_frequency_step_width 357 | mf_fft_gaussian = electro.gaussian_interpolation(fourier_amplitude, max_index) * fourier_frequency_step_width 358 | 359 | mf_autocorrelate_parabolic = electro.measure_main_frequency_autocorrelate(channel_values, input_sampling_rate) 360 | mf_zerocrossing = electro.measure_main_frequency_zero_crossing(channel_values, input_sampling_rate) 361 | 362 | # print_column('min/rms/max', f'{min(channel_values_part):.4f} / ' 363 | # f'{electro.rms(channel_values_part):.4f} / ' 364 | # f'{max(channel_values_part):.4f}') 365 | 366 | return main_frequency, mf_fft_parabolic, mf_fft_gaussian, mf_autocorrelate_parabolic, mf_zerocrossing 367 | 368 | 369 | def analyse_pair_window(voltage_values: List[float], ampere_values: List[float])\ 370 | -> Tuple[float, float, float, float, float, float]: 371 | 372 | P, Q, S = electro.calc_power(voltage_values, ampere_values) 373 | phase_angle = math.acos(P / S) 374 | voltage_rms = electro.rms(voltage_values) 375 | ampere_rms = electro.rms(ampere_values) 376 | return P, Q, S, phase_angle, voltage_rms, ampere_rms 377 | 378 | 379 | def analyse_channel_avg_local_min_max(channel_values: List[float]) -> Tuple[float, float]: 380 | def neighbor_iterator(values: List[float]) -> Generator[List[float], None, None]: 381 | for i in range(1, len(values)-1): 382 | yield values[i-1:i+2] 383 | 384 | local_min_values = [v for l, v, r in neighbor_iterator(channel_values) if l > v < r] 385 | local_max_values = [v for l, v, r in neighbor_iterator(channel_values) if l < v > r] 386 | return numpy.mean(local_min_values), numpy.mean(local_max_values) 387 | 388 | 389 | if __name__ == '__main__': 390 | main() 391 | -------------------------------------------------------------------------------- /csvexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from hantek1008 import Hantek1008, CorrectionDataType, ZeroOffsetShiftCompensationFunctionType 3 | from typing import Union, Optional, List, Dict, Any, IO, TextIO 4 | import typing 5 | import logging as log 6 | import argparse 7 | import time 8 | import datetime 9 | import os 10 | import lzma 11 | import sys 12 | import math 13 | from usb.core import USBError 14 | from time import sleep 15 | from utils.csvwriter import ThreadedCsvWriter, CsvWriter 16 | from enum import Enum 17 | 18 | assert sys.version_info >= (3, 6) 19 | 20 | # SamplingMode = enum.Enum("SamplingMode", ["BURST", "ROLL"]) 21 | 22 | 23 | class ArgparseEnum(Enum): 24 | def __str__(self) -> str: 25 | assert isinstance(self.value, str) 26 | return self.value 27 | 28 | 29 | class RawVoltMode(ArgparseEnum): 30 | VOLT = "volt" 31 | RAW = "raw" 32 | VOLT_AND_RAW = "volt+raw" 33 | 34 | 35 | class SamplingMode(ArgparseEnum): 36 | BURST = "burst" 37 | ROLL = "roll" 38 | 39 | 40 | class TimestampStyle(ArgparseEnum): 41 | OWN_ROW = "own_row" 42 | FIRST_COLUMN = "first_column" 43 | 44 | 45 | def main(csv_file_path: str, 46 | selected_channels: Optional[List[int]]=None, 47 | vertical_scale_factor: Optional[List[float]]=[1.0], 48 | calibrate_output_file_path: Optional[str]=None, 49 | calibrate_channels_at_once: Optional[int]=None, 50 | calibration_file_path: Optional[str]=None, 51 | zero_offset_shift_compensation_channel: Optional[int]=None, 52 | zero_offset_shift_compensation_function_file_path: Optional[str]=None, 53 | zero_offset_shift_compensation_function_time_offset_sec: int=0, 54 | raw_or_volt: RawVoltMode=RawVoltMode.VOLT, 55 | sampling_mode: SamplingMode=SamplingMode.ROLL, 56 | sampling_rate: float=440, 57 | ns_per_div: int=500_000, 58 | timestamp_style: TimestampStyle=TimestampStyle.OWN_ROW, 59 | do_sampling_rate_measure: bool=True) -> None: 60 | 61 | if selected_channels is None or len(selected_channels) == 0: 62 | selected_channels = list(range(1, 9)) 63 | 64 | assert len(set(selected_channels)) == len(selected_channels) 65 | assert all(1 <= c <= 8 for c in selected_channels) 66 | selected_channels = [i-1 for i in selected_channels] 67 | 68 | assert zero_offset_shift_compensation_channel is None or zero_offset_shift_compensation_function_file_path is None 69 | 70 | assert zero_offset_shift_compensation_channel is None or 1 <= zero_offset_shift_compensation_channel <= 8 71 | if zero_offset_shift_compensation_channel is not None: 72 | zero_offset_shift_compensation_channel -= 1 73 | 74 | assert zero_offset_shift_compensation_function_time_offset_sec >= 0 75 | 76 | assert vertical_scale_factor is None or isinstance(vertical_scale_factor, List) 77 | if vertical_scale_factor is None: 78 | vertical_scale_factor = [1.0] * 8 79 | elif len(vertical_scale_factor) == 1: 80 | vertical_scale_factor = [1.0 if i not in selected_channels 81 | else vertical_scale_factor[0] 82 | for i in range(8)] 83 | else: 84 | assert len(vertical_scale_factor) == len(selected_channels) 85 | # the vscale value of a channel is the value in vertical_scale_factor 86 | # on the same index as the channel in selected channel 87 | # or 1.0 if the channel is not in selected_channels 88 | vertical_scale_factor = [1.0 if i not in selected_channels 89 | else vertical_scale_factor[selected_channels.index(i)] 90 | for i in range(8)] 91 | 92 | correction_data: CorrectionDataType = [{} for _ in range(8)] # list of dicts of dicts 93 | # use case: correction_data[channel_id][vscale][units] = correction_factor 94 | 95 | zero_offset_shift_compensation_function = None 96 | if zero_offset_shift_compensation_function_file_path: 97 | globals_dict: Dict[str, Any] = {} 98 | with check_and_open_file(zero_offset_shift_compensation_function_file_path) as f: 99 | exec(f.read(), globals_dict) 100 | zero_offset_shift_compensation_function = globals_dict["calc_zos"] 101 | assert callable(zero_offset_shift_compensation_function) 102 | 103 | if calibration_file_path: 104 | with check_and_open_file(calibration_file_path) as f: 105 | import json 106 | calibration_data = json.load(f) 107 | 108 | log.info(f"Using calibration data from file '{calibration_file_path}' to correct measured values") 109 | 110 | for channel_id, channel_cdata in sorted(calibration_data.items()): 111 | channel_id = int(channel_id) 112 | if len(channel_cdata) == 0: 113 | continue 114 | log.info(f"Channel {channel_id+1}:") 115 | for test in channel_cdata: 116 | vscale = test["vscale"] 117 | test_voltage = test["test_voltage"] 118 | units = test["measured_value"] - test["zero_offset"] 119 | correction_factor = test_voltage / (units * 0.01 * vscale) 120 | 121 | if test_voltage == 0: 122 | continue 123 | assert 0.5 < correction_factor < 2.0, "Correction factor seems to be false" 124 | 125 | #log.info(f" {test} -> {correction_factor}") 126 | log.info(f"{test_voltage:>6}V -> {correction_factor:0.5f}") 127 | 128 | if vscale not in correction_data[channel_id]: 129 | correction_data[channel_id][vscale] = {} 130 | 131 | correction_data[channel_id][vscale][units] = correction_factor 132 | 133 | # log.info("\n".join(str(x) for x in correction_data)) 134 | channels_without_cd = [i + 1 for i, x in enumerate(correction_data) if len(x) == 0] 135 | if len(channels_without_cd) > 0: 136 | log.warning(f"There is no calibration data for channel(s): {channels_without_cd}") 137 | 138 | device = connect(ns_per_div, vertical_scale_factor, selected_channels, correction_data, zero_offset_shift_compensation_channel, 139 | zero_offset_shift_compensation_function, zero_offset_shift_compensation_function_time_offset_sec) 140 | 141 | if calibrate_output_file_path: 142 | assert calibrate_channels_at_once is not None 143 | calibration_routine(device, calibrate_output_file_path, calibrate_channels_at_once) 144 | device.close() 145 | sys.exit() 146 | 147 | measured_sampling_rate = None 148 | if do_sampling_rate_measure: 149 | measurment_duration = 10 150 | log.info(f"Measure sample rate of device (takes about {measurment_duration} sec) ...") 151 | measured_sampling_rate = measure_sampling_rate(device, sampling_rate, measurment_duration) 152 | log.info(f"-> {measured_sampling_rate:.4f} Hz") 153 | # TODO Remove 154 | # sys.exit() 155 | 156 | csv_file_path_zero = csv_file_path 157 | 158 | # data collection is in loop because in case of an error it restarts the collection 159 | for i in range(1, 100): 160 | try: 161 | sample(device, raw_or_volt, selected_channels, sampling_mode, sampling_rate, vertical_scale_factor, 162 | csv_file_path, timestamp_style, measured_sampling_rate) 163 | # no error? -> finished by user interaction 164 | break 165 | except USBError as usb_error: 166 | # usb error bug occurred? try to close the device or reset it, sleep a sec and restart 167 | log.error(str(usb_error)) 168 | try: 169 | device.close() 170 | except: 171 | try: 172 | sleep(0.5) 173 | device._dev.reset() 174 | except: 175 | pass 176 | sleep(1.0) 177 | device = connect(ns_per_div, vertical_scale_factor, selected_channels, correction_data, 178 | zero_offset_shift_compensation_channel, 179 | zero_offset_shift_compensation_function, 180 | zero_offset_shift_compensation_function_time_offset_sec) 181 | if csv_file_path_zero != '-': 182 | csv_file_path = f"{csv_file_path_zero}.{i:02d}" 183 | 184 | log.info("Exporting data finished") 185 | device.close() 186 | 187 | 188 | def connect(ns_per_div: int, 189 | vertical_scale_factor: Union[float, List[float]], 190 | selected_channels: List[int], 191 | correction_data: Optional[CorrectionDataType] = None, 192 | zero_offset_shift_compensation_channel: Optional[int] = None, 193 | zero_offset_shift_compensation_function: Optional[ZeroOffsetShiftCompensationFunctionType] = None, 194 | zero_offset_shift_compensation_function_time_offset_sec: int = 0) -> Hantek1008: 195 | device = Hantek1008(ns_per_div=ns_per_div, 196 | vertical_scale_factor=vertical_scale_factor, 197 | active_channels=selected_channels, 198 | correction_data=correction_data, 199 | zero_offset_shift_compensation_channel=zero_offset_shift_compensation_channel, 200 | zero_offset_shift_compensation_function=zero_offset_shift_compensation_function, 201 | zero_offset_shift_compensation_function_time_offset_sec 202 | =zero_offset_shift_compensation_function_time_offset_sec) 203 | 204 | try: 205 | log.info("Connecting...") 206 | try: 207 | device.connect() 208 | except RuntimeError as e: 209 | log.error(str(e)) 210 | sys.exit(1) 211 | log.info("Connection established") 212 | 213 | log.info("Initialising...") 214 | try: 215 | device.init() 216 | except RuntimeError as e: 217 | log.error(str(e)) 218 | sys.exit(1) 219 | log.info("Initialisation completed") 220 | except KeyboardInterrupt: 221 | device.close() 222 | sys.exit(0) 223 | 224 | return device 225 | 226 | 227 | def sample(device: Hantek1008, 228 | raw_or_volt: RawVoltMode, 229 | selected_channels: List[int], 230 | sampling_mode: SamplingMode, 231 | sampling_rate: float, 232 | vertical_scale_factor: List[float], 233 | csv_file_path: str, 234 | timestamp_style: TimestampStyle, 235 | measured_sampling_rate: Optional[float] = None 236 | ) -> None: 237 | log.info(f"Processing data of channel{'' if len(selected_channels) == 1 else 's'}:" 238 | f" {' '.join([str(i+1) for i in selected_channels])}") 239 | 240 | computed_actual_sampling_rate = Hantek1008.actual_sampling_rate_factor(len(selected_channels)) * sampling_rate 241 | if len(selected_channels) != Hantek1008.channel_count(): 242 | log.warning(f"When not using all 8 channels, the actual sampling rate ({computed_actual_sampling_rate:.2f}) is " 243 | f"higher than the given sampling_rate ({sampling_rate})! " 244 | f"Best is to use the --measuresamplingrate flag.") 245 | 246 | if raw_or_volt == RawVoltMode.VOLT_AND_RAW: # add the coresponding raw values to the selected channel list 247 | selected_channels += [sc + Hantek1008.channel_count() for sc in selected_channels] 248 | 249 | try: 250 | # csv_file: IO[str] = None 251 | # output_csv_filename = "channel_data.csv" 252 | if csv_file_path == '-': 253 | log.info("Exporting data to stdout...") 254 | csv_file: IO[str] = sys.stdout 255 | elif csv_file_path.endswith(".xz"): 256 | log.info(f"Exporting data lzma-compressed to file '{csv_file_path}'...") 257 | csv_file = lzma.open(csv_file_path, 'at', newline='') 258 | else: 259 | log.info(f"Exporting data to file '{csv_file_path}'...") 260 | csv_file = open(csv_file_path, 'at', newline='') 261 | 262 | csv_writer: CsvWriter = ThreadedCsvWriter(csv_file, delimiter=',') 263 | 264 | csv_writer.write_comment("HEADER") 265 | 266 | now = datetime.datetime.now() 267 | # timestamps are by nature UTC 268 | csv_writer.write_comment(f"UNIX-Time: {now.timestamp()}") 269 | csv_writer.write_comment(f"UNIX-Time: {now.astimezone(datetime.timezone.utc).isoformat()} UTC") 270 | 271 | # channel >= 8 are the raw values of the corresponding channels < 8 272 | channel_titles = [f'ch_{i+1 if i < 8 else (str(i+1-8)+"_raw")}' for i in selected_channels] 273 | if timestamp_style == "first_column": 274 | channel_titles = ["time"] + channel_titles 275 | csv_writer.write_comment(f"{', '.join(channel_titles)}") 276 | 277 | csv_writer.write_comment(f"sampling mode: {str(sampling_mode)}") 278 | 279 | csv_writer.write_comment(f"intended samplingrate: {sampling_rate} Hz") 280 | csv_writer.write_comment(f"samplingrate: {computed_actual_sampling_rate} Hz") 281 | if measured_sampling_rate: 282 | csv_writer.write_comment(f"measured samplingrate: {measured_sampling_rate} Hz") 283 | 284 | csv_writer.write_comment(f"vscale: {', '.join(str(f) for f in vertical_scale_factor)}") 285 | csv_writer.write_comment("# zero offset data:") 286 | zero_offsets = device.get_zero_offsets() 287 | assert zero_offsets is not None 288 | for vscale, zero_offset in sorted(zero_offsets.items()): 289 | csv_writer.write_comment(f"zero_offset [{vscale:<4}]: {' '.join([str(round(v, 1)) for v in zero_offset])}") 290 | 291 | csv_writer.write_comment(f"zosc-method: {device.get_used_zero_offsets_shift_compensation_method()}") 292 | 293 | csv_writer.write_comment(f"DATA") 294 | 295 | # TODO: make this configurable 296 | milli_volt_int_representation = False 297 | 298 | def write_per_channel_data(per_channel_data: Dict[int, Union[List[int], List[float]]], 299 | time_of_first_value: Optional[float], 300 | time_of_last_value: float) \ 301 | -> None: 302 | # sort all channels the same way as in selected_channels 303 | per_channel_data_list = [per_channel_data[ch] for ch in selected_channels] 304 | 305 | if milli_volt_int_representation: 306 | per_channel_data_list = [[int(round(value*1000)) for value in single_channel] 307 | for single_channel in per_channel_data_list] 308 | 309 | if timestamp_style == "first_column": 310 | assert time_of_first_value is not None 311 | values_per_channel_count = len(per_channel_data_list[0]) 312 | deltatime_per_value = (time_of_last_value - time_of_first_value) / values_per_channel_count 313 | timestamps_interpolated = [time_of_first_value + i * deltatime_per_value 314 | for i in range(values_per_channel_count)] 315 | csv_writer.write_rows(zip(timestamps_interpolated, *per_channel_data_list)) 316 | else: # timestamp_style == "own_row": 317 | csv_writer.write_rows(zip(*per_channel_data_list)) 318 | # timestamps are by nature UTC 319 | csv_writer.write_comment(f"UNIX-Time: {time_of_last_value}") 320 | 321 | if sampling_mode == SamplingMode.ROLL: 322 | last_timestamp = datetime.datetime.now().timestamp() 323 | for per_channel_data in device.request_samples_roll_mode(mode=str(raw_or_volt), sampling_rate=sampling_rate): 324 | now_timestamp = datetime.datetime.now().timestamp() 325 | write_per_channel_data(per_channel_data, last_timestamp, now_timestamp) 326 | last_timestamp = now_timestamp 327 | else: # burst mode 328 | # TODO currently not supported 329 | # TODO missing features: 330 | # * timestamp_style 331 | assert timestamp_style == TimestampStyle.OWN_ROW 332 | while True: 333 | per_channel_data = device.request_samples_burst_mode() 334 | now_timestamp = datetime.datetime.now().timestamp() 335 | write_per_channel_data(per_channel_data, None, now_timestamp) 336 | 337 | except KeyboardInterrupt: 338 | log.info("Sample collection was stopped by user") 339 | pass 340 | 341 | if csv_writer: 342 | csv_writer.close() 343 | 344 | 345 | def measure_sampling_rate(device: Hantek1008, used_sampling_rate: float, measurment_duration: float) -> float: 346 | required_samples = max(4, int(math.ceil(measurment_duration * used_sampling_rate))) 347 | counter = -1 348 | start_time: float = 0 349 | for per_channel_data in device.request_samples_roll_mode(sampling_rate=used_sampling_rate): 350 | if counter == -1: # skip first samples to ignore the duration of initialisation 351 | start_time = time.perf_counter() 352 | counter = 0 353 | counter += len(per_channel_data[0]) 354 | if counter >= required_samples: 355 | break 356 | 357 | duration = time.perf_counter() - start_time 358 | return counter/duration 359 | 360 | 361 | def calibration_routine(device: Hantek1008, calibrate_file_path: str, channels_at_once: int) -> None: 362 | assert channels_at_once in [1, 2, 4, 8] 363 | 364 | print("This interactive routine will generate a calibration that can later be used " 365 | "to get more precise results. It works by connecting different well known " 366 | "voltages one after another to a channel. Once all calibration voltages are " 367 | "measured, the same is done for every other channel.") 368 | 369 | import json 370 | required_calibration_samples_nun = 512 371 | calibration_data: Dict[int, List[Dict[str, Any]]] = {} # dictionary of lists 372 | device.pause() 373 | 374 | test_voltages = None 375 | while test_voltages is None: 376 | try: 377 | in_str = input("Calibration voltages (x, y, z, ...): ") 378 | test_voltages = [float(v) for v in in_str.split(',')] 379 | if len(test_voltages) < 1: 380 | print("Input must contain at least one voltage") 381 | except ValueError: 382 | print("Input must be comma separated floats") 383 | 384 | print(f"Calibration voltages are: {' '.join([ f'{v}V' for v in test_voltages])}") 385 | 386 | for channel_id in range(8): 387 | calibration_data[channel_id] = [] 388 | 389 | for channel_id in range(0, 8, channels_at_once): 390 | 391 | for test_voltage in test_voltages: 392 | cmd = input(f"Do {test_voltage}V measurement on channel {channel_id+1}" 393 | f"{(' to ' + str(channel_id+channels_at_once)) if channels_at_once>1 else ''} (Enter)," 394 | f" skip voltage (s), skip channel (ss) or quit (q): ") 395 | if cmd == 'q': 396 | return 397 | elif cmd == 'ss': 398 | break 399 | elif cmd == 's': 400 | continue 401 | 402 | device.cancel_pause() 403 | 404 | print(f"Measure {required_calibration_samples_nun} values for {test_voltage}V...") 405 | data = [] 406 | for _, row in zip( 407 | range(required_calibration_samples_nun), 408 | device.request_samples_roll_mode_single_row(mode="raw")): 409 | data.append(row) 410 | pass 411 | 412 | device.pause() 413 | 414 | channel_data = list(zip(*data)) 415 | 416 | for calibrated_channel_id in range(channel_id, channel_id+channels_at_once): 417 | cd = channel_data[calibrated_channel_id] 418 | avg = sum(cd) / len(cd) 419 | 420 | calibration_data[calibrated_channel_id].append({ 421 | "test_voltage": test_voltage, 422 | "measured_value": round(avg, 2), 423 | "vscale": device.get_vscales()[calibrated_channel_id], 424 | "zero_offset": round(device.get_zero_offset(channel_id=calibrated_channel_id), 2) 425 | }) 426 | 427 | with open(calibrate_file_path, 'w') as calibration_file: 428 | calibration_file.write(json.dumps(calibration_data)) 429 | 430 | 431 | def check_and_open_file(file_path: str) -> TextIO: 432 | if not os.path.exists(file_path): 433 | log.error(f"There is no file '{file_path}'.") 434 | sys.exit(1) 435 | if os.path.isdir(file_path): 436 | log.error(f"'{file_path}' is a directory.") 437 | sys.exit(1) 438 | return open(file_path) 439 | 440 | 441 | if __name__ == "__main__": 442 | 443 | description = f"""\ 444 | Collect data from device 'Hantek 1008'. Usage examples: 445 | * Save data sampled with 22 Hz in file 'my_data.csv': 446 | {sys.argv[0]} my_data.csv --channels 1 2 --samplingrate 22 447 | * Create and fill calibration file 'my_cal.json': 448 | {sys.argv[0]} --calibrate my_cal.cd.json 1 449 | """ 450 | 451 | def channel_type(value: str) -> int: 452 | ivalue = int(value) 453 | if 1 <= ivalue <= 8: 454 | return ivalue 455 | raise argparse.ArgumentTypeError(f"There is no channel {value}") 456 | 457 | str_to_log_level = {log.getLevelName(ll).lower(): ll for ll in [log.DEBUG, log.INFO, log.WARN]} 458 | 459 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, 460 | description=description) 461 | command_group = parser.add_mutually_exclusive_group(required=True) 462 | command_group.add_argument(metavar='csv_path', dest='csv_path', nargs='?', 463 | type=str, default=None, 464 | help='Exports measured data to the given file in CSV format.' 465 | " If the filename ends with '.xz' the content is compressed using lzma/xz." 466 | " This reduces the file size to ~ 1/12 compared to the uncompressed format." 467 | " Those files can be decompressed using 'xz -dk '.") 468 | command_group.add_argument('--calibrate', metavar=('calibrationfile_path', 'channels_at_once'), nargs=2, 469 | type=str, default=None, 470 | help='If set, calibrate the device by measuring given voltages and write' 471 | ' calibration values to given file.' 472 | ' Multiple channels (1, 2, 4 or all 8) can be calibrated at the same time' 473 | ' if supplied with the same voltage. Ignores all other arguments.') 474 | parser.add_argument('-s', '--channels', metavar='channel', nargs='+', 475 | type=channel_type, default=list(range(1, 9)), 476 | help="Selects channels of interest.") 477 | parser.add_argument('-l', '--loglevel', dest='log_level', nargs='?', 478 | type=str, default="info", choices=str_to_log_level.keys(), 479 | help='Sets the log level for debugging.') 480 | parser.add_argument('-v', '--vscale', metavar='scale', nargs="+", 481 | type=float, default=[1.0], choices=Hantek1008.valid_vscale_factors(), 482 | help='Sets the pre scale in the hardware, must be 1, 0.125, or 0.02. If a single value is ' 483 | 'given, all selected channels will use that vscale, otherwise there must be one value ' 484 | 'per selected channel.') 485 | parser.add_argument('-c', '--calibrationfile', dest="calibration_file_path", metavar='calibrationfile_path', 486 | type=str, default=None, 487 | help="Use the content of the given calibration file to correct the measured samples.") 488 | parser.add_argument('-r', '--raw', dest="raw_or_volt", 489 | type=str, default=RawVoltMode.VOLT, const=RawVoltMode.RAW, nargs='?', choices=list(RawVoltMode), 490 | help="Specifies whether the sample values returned from the device should be transformed " 491 | "to volts (using calibration data if specified) or not. If not set, the default " 492 | "value is 'volt'. If the flag is set without a parameter, 'raw' is used.") 493 | parser.add_argument('-z', '--zoscompensation', dest="zos_compensation", metavar='x', 494 | type=str, default=None, nargs='*', 495 | help= 496 | """Compensates the zero offset shift that occurs over longer timescales. 497 | There are two possible ways of compensating that: 498 | (A) Computing the shift out of an unused channel: Needs at least one unused channel, make sure 499 | that no external voltage is applied to the given channel. 500 | (B) Computing the shift with the help of a given function. Such a function computes a 501 | correction-factor based on the time passed since start. 502 | Defaults to no compensation. If used without an argument, method A is used on channel 8. 503 | If an integer argument is given, method A is used on that channel. Otherwise, method B is used, 504 | which expects a path to a python file with containing a function 505 | (calc_zos(ch: int, vscale: float, dtime: float)->float) in it 506 | and as a second argument a time offset (how long the device is already running in sec). 507 | """) 508 | parser.add_argument('-b', '--samplingmode', dest='sampling_mode', 509 | type=SamplingMode, default=SamplingMode.ROLL, choices=list(SamplingMode), 510 | help="TODO") 511 | parser.add_argument('-f', '--samplingrate', dest='sampling_rate', 512 | type=float, default=440, choices=Hantek1008.valid_roll_mode_sampling_rates(), 513 | help='Sets the sampling rate (in Hz) the device should use in roll mode (default:440). ' 514 | 'If not all channels are used the actual sampling rate is higher. The factors are: ' 515 | f'{[Hantek1008.actual_sampling_rate_factor(ch) for ch in range(1, 9)]}. ' 516 | 'E.g. if only two channels are used the actual sampling rate is 3.03 higher ' 517 | 'than the given value. A free channel that is used for the zos-compensation will reduce ' 518 | 'the actual sampling the same way as if the channel is normally used.') 519 | parser.add_argument('-n', '--nsperdiv', dest='ns_per_div', 520 | type=float, default=500_000, choices=Hantek1008.valid_burst_mode_ns_per_divs(), 521 | help='Sets the horizontal resolution (in nanoseconds per div) the device should use in ' 522 | 'burst mode (default:500_000). A single div contains around 25 samples.' 523 | 'If not all channels are used, the actual resolution increases by an unknown factor.') 524 | parser.add_argument('-m', '--measuresamplingrate', dest='do_sampling_rate_measure', action="store_const", 525 | default=False, const=True, 526 | help='Measures the exact sampling rate the device achieves by using the computer internal ' 527 | 'clock. Increases startup duration by ~10 sec.') 528 | parser.add_argument('-t', '--timestampstyle', dest="timestamp_style", 529 | type=TimestampStyle, default=TimestampStyle.OWN_ROW, nargs='?', choices=list(TimestampStyle), 530 | help="Specifies the style of the timestamps included in the CSV output. There" 531 | " are two options: When the 'own_row' style is used, every time the device sends a bunch" 532 | " of measured samples, these are written to the CSV output followed by one row with the" 533 | " timestamp." 534 | " Use the 'first_column' option to let the first column of each line have an interpolated" 535 | " timestamp. Default is 'own_row'.") 536 | 537 | args = parser.parse_args() 538 | 539 | args.log_level = str_to_log_level[args.log_level] 540 | 541 | def arg_assert(ok: bool, fail_message: str) -> None: 542 | if not ok: 543 | parser.error(fail_message) 544 | 545 | 546 | if args.calibrate is not None: 547 | calibrate_channels_at_once = args.calibrate[1] 548 | arg_assert(calibrate_channels_at_once.isdigit() and int(calibrate_channels_at_once) in [1, 2, 4, 8], 549 | "The second argument must be 1, 2, 4 or 8.") 550 | 551 | arg_assert(len(args.vscale) == 1 or len(args.vscale) == len(args.channels), 552 | "There must be one vscale factor or as many as selected channels") 553 | arg_assert(len(set(args.channels)) == len(args.channels), 554 | "Selected channels list is not a set (multiple occurrences of the same channel id") 555 | # arg_assert(args.calibration_file_path is None or not args.raw_or_volt.contains("volt"), 556 | # "--calibrationfile can not be used together with the '--raw volt' flag") 557 | # arg_assert(args.zos_compensation is None or not args.raw_or_volt.contains("volt"), 558 | # "--zoscompensation can not be used together with the '--raw volt' flag") 559 | 560 | if args.zos_compensation is not None: 561 | arg_assert(len(args.zos_compensation) <= 2, "'--zoscompensation' only awaits 0, 1 or 2 parameters") 562 | if len(args.zos_compensation) == 0: 563 | # defaults to channel 8 564 | args.zos_compensation = [8] 565 | if len(args.zos_compensation) == 1: # if compensation via unused channel is used 566 | args.zos_compensation[0] = channel_type(args.zos_compensation[0]) 567 | arg_assert(len(args.channels) < 8, 568 | "Zero-offset-shift-compensation is only possible if there is at least one unused channel") 569 | arg_assert(args.zos_compensation[0] not in args.channels, 570 | f"The channel {args.zos_compensation[0]} is used for Zero-offset-shift-compensation," 571 | f" but it is also a selected channel") 572 | if len(args.zos_compensation) == 2: # if compensation via function is used 573 | arg_assert(args.zos_compensation[1].isdigit(), "The second argument must be an int") 574 | args.zos_compensation[1] = int(args.zos_compensation[1]) 575 | 576 | arg_assert(not (args.do_sampling_rate_measure and args.sampling_mode == SamplingMode.BURST), 577 | "Measuring the sample rate only works in roll mode") 578 | 579 | log.basicConfig(level=args.log_level, format='%(levelname)-7s: %(message)s') 580 | 581 | main(selected_channels=args.channels, 582 | vertical_scale_factor=args.vscale, 583 | csv_file_path=args.csv_path, 584 | calibrate_output_file_path=args.calibrate[0] if args.calibrate else None, 585 | calibrate_channels_at_once=int(args.calibrate[1]) if args.calibrate else None, 586 | calibration_file_path=args.calibration_file_path, 587 | raw_or_volt=args.raw_or_volt, 588 | zero_offset_shift_compensation_channel= 589 | args.zos_compensation[0] 590 | if args.zos_compensation is not None and len(args.zos_compensation) == 1 591 | else None, 592 | zero_offset_shift_compensation_function_file_path= 593 | args.zos_compensation[0] 594 | if args.zos_compensation is not None and len(args.zos_compensation) == 2 595 | else None, 596 | zero_offset_shift_compensation_function_time_offset_sec= 597 | args.zos_compensation[1] 598 | if args.zos_compensation is not None and len(args.zos_compensation) == 2 599 | else 0, 600 | sampling_mode=args.sampling_mode, 601 | sampling_rate=args.sampling_rate, 602 | ns_per_div=args.ns_per_div, 603 | timestamp_style=args.timestamp_style, 604 | do_sampling_rate_measure=args.do_sampling_rate_measure) 605 | -------------------------------------------------------------------------------- /hantek1008.py: -------------------------------------------------------------------------------- 1 | import usb.core 2 | import usb.util 3 | import usb.backend 4 | import time 5 | from time import sleep 6 | from typing import Union, Optional, List, Dict, Tuple, Callable, Generator 7 | import logging as log 8 | import math 9 | from threading import Thread 10 | import copy 11 | import sys 12 | 13 | # marking a child class method with overrides makes sure the method overrides a parent class method. 14 | # this check is only needed during development so its no problem if this package is not installed. 15 | # to avoid errors, we need to define a dummy decorator. 16 | try: 17 | from overrides import overrides 18 | except ImportError: 19 | # create dummy decorator that accepts any arguments 20 | def overrides(**kwargs): 21 | def overrides_helper(method: Callable) -> Callable: 22 | return method 23 | return overrides_helper 24 | 25 | assert sys.version_info >= (3, 6) 26 | 27 | """ 28 | To get access to the USB Device: 29 | 30 | 1. create file "/etc/udev/rules.d/99-hantek1008.rules" with content: 31 | ACTION=="add", SUBSYSTEM=="usb", ATTRS{idVendor}=="0783", ATTR{idProduct}=="5725", MODE="0666" 32 | 2. sudo udevadm control -R 33 | 3. Replug the device 34 | """ 35 | 36 | 37 | class Hantek1008Raw: 38 | """ 39 | This class communicates to a Hantek1008 device via USB. 40 | It supports configuring the device (set vertical scale, sampling frequency, waveform generator,..) 41 | and measuring samples with it. Either in continuous (rolling) mode or in windows (normal/burst) mode. 42 | """ 43 | # channel_id/channel_index are zero based 44 | # channel names are one based 45 | 46 | __MAX_PACKAGE_SIZE: int = 64 47 | __VSCALE_FACTORS: List[float] = [0.02, 0.125, 1.0] 48 | __roll_mode_sampling_rate_to_id_dic: Dict[float, int] = \ 49 | {440: 0x18, 220: 0x19, 88: 0x1a, 44: 0x1b, 50 | 22: 0x1c, 11: 0x1d, 5: 0x1e, 2: 0x1f, 51 | 1: 0x20, 0.5: 0x21, 0.25: 0x22, 0.125: 0x23, 52 | 1.0/16: 0x24} 53 | # ids for all valid nanoseconds per div. These ns_per_divs have following pattern: (1|2|3){0} 54 | # eg. 10, 2000 or 5. Maximum is 200_000_000 55 | # a div contains around 25 samples 56 | __burst_mode_ns_per_div_to_id_dic = {({0: 1, 1: 2, 2: 5}[id % 3] * 10 ** (id // 3)): id for id in range(26)} 57 | 58 | def __init__(self, ns_per_div: int = 500_000, 59 | vertical_scale_factor: Union[float, List[float]] = 1.0, 60 | active_channels: Optional[List[int]] = None, 61 | trigger_channel: int = 0, 62 | trigger_slope: str = "rising", 63 | trigger_level: int = 2048 64 | ) -> None: 65 | """ 66 | :param ns_per_div: 67 | :param vertical_scale_factor: must be an array of length 8 with a float scale value for each channel 68 | or a single float scale factor applied to all channels. The float must be either 1.0, 0.2 or 0.02. 69 | :param active_channels: a list of channel that will be used 70 | """ 71 | 72 | assert isinstance(vertical_scale_factor, float) \ 73 | or len(vertical_scale_factor) == Hantek1008Raw.channel_count() 74 | 75 | self.__ns_per_div: int = ns_per_div # one value for all channels 76 | 77 | self.__active_channels: List[int] = copy.deepcopy(active_channels) if active_channels is not None\ 78 | else Hantek1008Raw.valid_channel_ids() 79 | self.__active_channels = sorted(self.__active_channels) # some methods depend of ascending order of this 80 | 81 | # one vertical scale factor (float) per channel 82 | self.__vertical_scale_factors: List[float] = [vertical_scale_factor] * Hantek1008Raw.channel_count() \ 83 | if isinstance(vertical_scale_factor, float) \ 84 | else copy.deepcopy(vertical_scale_factor) # scale factor per channel 85 | 86 | self.__trigger_channel: int = trigger_channel 87 | self.__trigger_slope: str = trigger_slope 88 | self.__trigger_level: int = trigger_level 89 | 90 | # dict of list of floats, outer dict is of size 3 and contains values 91 | # for every vertical scale factor, inner list contains an zero offset per channel 92 | self._zero_offsets: Optional[Dict[float, List[float]]] = None 93 | 94 | self.__out: usb.core.Endpoint = None # the usb out endpoint 95 | self.__in: usb.core.Endpoint = None # the usb in endpoint 96 | self._dev: usb.core.Device = None # the usb device 97 | self._cfg: usb.core.Configuration = None # the used usb configuration 98 | self._intf: usb.core.Interface = None # the used usb interface 99 | 100 | self.__pause_thread: Optional[Thread] = None 101 | self.__cancel_pause_thread: bool = False 102 | 103 | def connect(self) -> None: 104 | """Find a plugged in hantek 1008c device and set up the connection to it""" 105 | 106 | self._dev = usb.core.find(idVendor=0x0783, idProduct=0x5725) 107 | 108 | # was it found? 109 | if self._dev is None: 110 | raise RuntimeError('No Hantek 1008 device found') 111 | 112 | # set the active configuration. With no arguments, the first 113 | # configuration will be the active one 114 | self._dev.set_configuration() 115 | 116 | self._cfg = self._dev.get_active_configuration() 117 | self._intf = self._cfg[(0, 0)] 118 | 119 | # get an output endpoint instance 120 | self.__out = usb.util.find_descriptor( 121 | self._intf, 122 | # match the first OUT endpoint 123 | custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT) 124 | 125 | # get an input endpoint instance 126 | self.__in = usb.util.find_descriptor( 127 | self._intf, 128 | # match the first IN endpoint 129 | custom_match=lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN) 130 | 131 | assert self.__out is not None 132 | assert self.__in is not None 133 | 134 | def __write_and_receive(self, message: bytes, response_length: int, 135 | sec_till_response_request: float = 0.002, sec_till_start: float = 0.002) -> bytes: 136 | """write to and read from the device""" 137 | start_time = time.time() 138 | 139 | assert isinstance(message, bytes) 140 | log.debug(f">[{len(message):2}] {bytes.hex(message)}") 141 | 142 | sleep(sec_till_start) 143 | 144 | self.__out.write(message) 145 | 146 | sleep(sec_till_response_request) 147 | 148 | response = bytes(self.__in.read(response_length)) 149 | 150 | log.debug(f"<[{len(response):2}] {bytes.hex(response)}") 151 | log.debug(f"delta: {time.time()-start_time:02.4f} sec") 152 | assert len(response) == response_length 153 | 154 | return response 155 | 156 | def __send_cmd(self, cmd_id: int, parameter: Union[bytes, List[int], str] = b'', 157 | response_length: int = 0, echo_expected: bool = True, 158 | sec_till_response_request: float = 0, sec_till_start: float = 0.002) -> bytes: 159 | """sends a command to the device and checks if the device echos the command id""" 160 | if isinstance(parameter, str): 161 | parameter = bytes.fromhex(parameter) 162 | elif isinstance(parameter, list): 163 | parameter = bytes(parameter) 164 | assert isinstance(parameter, bytes) 165 | assert 0 <= cmd_id <= 255 166 | 167 | msg = bytes([cmd_id]) + parameter 168 | response = self.__write_and_receive(msg, response_length + (1 if echo_expected else 0), 169 | sec_till_response_request=sec_till_response_request, 170 | sec_till_start=sec_till_start) 171 | if echo_expected: 172 | assert response[0] == cmd_id 173 | return response[1:] 174 | else: 175 | return response 176 | 177 | def __send_c6_a6_command(self, parameter: int) -> bytes: 178 | """send the c602 or c603 command, then parse the response as sample_length. then CEIL(sample_length/64) 179 | a602 or a603 requests follow. The responses are concatenated and finally returned trimmed to fit the sample_length. 180 | """ 181 | assert parameter in [2, 3] 182 | response = self.__send_cmd(0xc6, parameter=[parameter], response_length=2, echo_expected=False) 183 | sample_length = int.from_bytes(response, byteorder="big", signed=False) 184 | sample_packages_count = int(math.ceil(sample_length / self.__MAX_PACKAGE_SIZE)) 185 | # print("sample_length: {} -> {} packages".format(sample_length, sample_packages_count)) 186 | samples = b'' 187 | for _ in range(sample_packages_count): 188 | response = self.__send_cmd(0xa6, parameter=[parameter], response_length=64, echo_expected=False) 189 | samples += response 190 | return samples[0:sample_length] 191 | 192 | def __send_a55a_command(self, attempts: int=20) -> None: 193 | for _ in range(attempts): 194 | response = self.__send_cmd(0xa5, parameter=[0x5a], response_length=1) 195 | assert response[0] in [0, 1, 2, 3] 196 | if response[0] in [2, 3]: 197 | return 198 | sleep(0.02) 199 | self.__send_ping() 200 | raise RuntimeError(f"a55a command failed, all {attempts} attempts were answered with 0 or 1.") 201 | 202 | def __send_set_time_div(self, ns_per_div: int = 500000) -> None: 203 | """send the a3 command to set the sample rate. 204 | only allows values that follow this pattern: (1|2|3){0}. eg. 10, 2000 or 5. 205 | Maximum is 200_000_000""" 206 | # assert isinstance(ns_per_div, int) 207 | # assert 0 < ns_per_div <= 200 * 1000 * 1000 # when the value is higher than 200ms/div, the scan mode must be used 208 | # assert int(str(ns_per_div)[1:]) == 0, "only first digit is allowed to be != 0" 209 | # assert int(str(ns_per_div)[0]) in [1, 2, 5], "first digit must be 1, 2 or 5" 210 | # time_per_div_id = {1: 0, 2: 1, 5: 2}[int(str(ns_per_div)[0])] + int(math.log10(ns_per_div)) * 3 211 | assert ns_per_div in self.__burst_mode_ns_per_div_to_id_dic, "The given ns_per_div is invalid" 212 | 213 | time_per_div_id = self.__burst_mode_ns_per_div_to_id_dic[ns_per_div] 214 | self.__send_cmd(0xa3, parameter=[time_per_div_id]) 215 | 216 | @staticmethod 217 | def _vertical_scale_id_to_factor(vs_id: int) -> float: 218 | assert 1 <= vs_id <= len(Hantek1008Raw.__VSCALE_FACTORS) 219 | return Hantek1008Raw.__VSCALE_FACTORS[vs_id - 1] 220 | 221 | @staticmethod 222 | def _vertical_scale_factor_to_id(vs_factor: float) -> int: 223 | assert vs_factor in Hantek1008Raw.__VSCALE_FACTORS 224 | return Hantek1008Raw.__VSCALE_FACTORS.index(vs_factor) + 1 225 | 226 | def __send_set_vertical_scale(self, scale_factors: List[float]) -> None: 227 | """send the a2 command to set the vertical sample scale factor per channel. 228 | Only following values are allowed: 1.0, 0.125, 0.02 [TODO: check] Volt/Div. 229 | scale_factor must be an array of length 8 with a float scale value for each channel. 230 | Or a single float, than all channel will have that scale factor""" 231 | assert all(x in Hantek1008Raw.__VSCALE_FACTORS for x in scale_factors) 232 | scale_factor_id: List[int] = [Hantek1008Raw._vertical_scale_factor_to_id(sf) for sf in scale_factors] 233 | self.__send_cmd(0xa2, parameter=scale_factor_id, sec_till_response_request=0.2132) 234 | 235 | def __send_set_active_channels(self, active_channels: List[int]) -> None: 236 | """ 237 | Activates only the channels thar are in the list 238 | :param active_channels: a list of the channels that should be active 239 | :return: 240 | """ 241 | assert active_channels is not None 242 | assert len(active_channels) > 0 243 | assert all(c in self.valid_channel_ids() for c in active_channels) 244 | assert len(set(active_channels)) == len(active_channels), "One channel must nut be more than once in the list" 245 | 246 | # set the count of active channels 247 | self.__send_cmd(0xa0, parameter=[len(active_channels)]) 248 | 249 | active_channels_byte_map = [(0x01 if i in active_channels else 0x00) 250 | for i in range(0, 8)] 251 | # what channels should be active? 252 | self.__send_cmd(0xaa, parameter=active_channels_byte_map) 253 | 254 | def __send_set_trigger(self, source_channel: int, slope: str) -> None: 255 | slope_map = {"rising": 0, "falling": 1} 256 | assert source_channel in self.valid_channel_ids() 257 | assert slope in slope_map, f"Only following slope types are allowed: {list(slope_map.keys())}" 258 | 259 | self.__send_cmd(0xc1, parameter=[source_channel, slope_map[slope]]) 260 | 261 | def __send_set_trigger_level(self, level: int) -> None: 262 | assert 0 <= level <= 2**12 263 | self.__send_cmd(0xab, parameter=int.to_bytes(level, length=2, byteorder="big", signed=False)) 264 | 265 | def __send_ping(self, sec_till_start: float=0) -> None: 266 | self.__send_cmd(0xf3, sec_till_start=sec_till_start) 267 | 268 | def init(self) -> None: 269 | self._init1() 270 | self._init2() 271 | self._init3() 272 | 273 | def _init1(self) -> None: 274 | """Initialize the device like the windows software does it""" 275 | self.__send_cmd(0xb0) 276 | sleep(0.7) # not sure if needed 277 | self.__send_cmd(0xb0) 278 | self.__send_ping() 279 | 280 | 281 | #self.__send_cmd(0xb9, parameter=bytes.fromhex("01 b0 04 00 00")) # 185 282 | #self.__send_cmd(0xb7, parameter=bytes.fromhex("00")) # 183 283 | #self.__send_cmd(0xbb, parameter=bytes.fromhex("08 00")) # 187 284 | self.set_generator_speed(300_000) 285 | self.set_generator_on(False) 286 | 287 | response = self.__send_cmd(0xb5, response_length=64, echo_expected=False, 288 | sec_till_response_request=0.0193) # 181 289 | # assert response == bytes.fromhex("00080008000800080008000800080008d407c907ef07cd07df07eb07c707d707" 290 | # "e107d207f007d807e607ed07d507e207f607e007f007e907f007ef07ea07f207") 291 | 292 | response = self.__send_cmd(0xb6, response_length=64, echo_expected=False) # 182 293 | # assert response == bytes.fromhex("04040404040404040404040404040404d200d500d800d400d400d500d200d200" 294 | # "9c009f009f009d009d009d009e009d00fd01fc01fc01fc01fb01fa01fd01fc01") 295 | 296 | response = self.__send_cmd(0xe5, response_length=2, echo_expected=False) 297 | # assert response == bytes.fromhex("d6 06") 298 | 299 | response = self.__send_cmd(0xf7, response_length=64, echo_expected=False) 300 | # assert response == bytes.fromhex("2cfd8ffb54fa2ef878007a007b00780079007a0079007800b801bf01c301ba01" 301 | # "bb01be01b701b801f90203030803fb02fc020003f502f80294ff92ff8fff93ff") 302 | 303 | response = self.__send_cmd(0xf8, response_length=64, echo_expected=False) 304 | # assert response == bytes.fromhex("92ff91ff96ff94ffc9fec4febdfec8fec7fec2fecffec9fe4cfe45fe3afe4afe" 305 | # "48fe42fe54fe4dfe70ff70ff71ff70ff71ff71ff72ff71ff7efe7bfe7afe7efe") 306 | 307 | response = self.__send_cmd(0xfa, response_length=56, echo_expected=False) 308 | # assert response == bytes.fromhex("7dfe7efe80fe7ffe90019401930192018f01900191018f0195029b0299029802" 309 | # "930294029702940290fd89fd84fd90fd8dfd8cfd94fd91fd") 310 | 311 | self.__send_cmd(0xf5, sec_till_response_request=0.2132) 312 | 313 | # self.__send_cmd(0xa0, parameter=bytes.fromhex("08")) 314 | # self.__send_cmd(0xaa, parameter=bytes.fromhex("0101010101010101")) 315 | # activate all 8 channels 316 | self.__send_set_active_channels(Hantek1008Raw.valid_channel_ids()) 317 | 318 | self.__send_set_time_div(500 * 1000) # 500us, the default value in the windows software 319 | 320 | self.__send_set_trigger(0, "rising") 321 | 322 | response = self.__send_cmd(0xa7, parameter=bytes.fromhex("0000"), response_length=1) 323 | assert response == bytes.fromhex("00") 324 | 325 | self.__send_cmd(0xac, parameter=bytes.fromhex("01f40009c50009c5")) 326 | 327 | def _init2(self) -> None: 328 | """get zero offsets for all channels and vscales""" 329 | self._zero_offsets = {} 330 | for vscale_id in range(1, 4): 331 | vscale = Hantek1008Raw._vertical_scale_id_to_factor(vscale_id) 332 | 333 | self.__send_ping() 334 | 335 | self.__send_set_vertical_scale([vscale] * Hantek1008Raw.channel_count()) 336 | 337 | self.__send_cmd(0xa4, parameter=[0x01]) 338 | 339 | self.__send_cmd(0xc0) 340 | 341 | sleep(0.0124) 342 | self.__send_cmd(0xc2) 343 | 344 | self.__send_a55a_command() 345 | 346 | samples2 = self.__send_c6_a6_command(0x02) 347 | samples3 = self.__send_c6_a6_command(0x03) 348 | samples = samples2 + samples3 349 | shorts = Hantek1008Raw.__from_bytes_to_shorts(samples) 350 | per_channel_data = Hantek1008Raw.__to_per_channel_lists(shorts, Hantek1008Raw.valid_channel_ids()) 351 | zero_offset_per_channel = [sum(per_channel_data[ch]) / float(len(per_channel_data[ch])) 352 | for ch in Hantek1008Raw.valid_channel_ids()] 353 | self._zero_offsets[vscale] = zero_offset_per_channel 354 | 355 | def _init3(self) -> None: 356 | self.__send_cmd(0xf6, sec_till_response_request=0.2132) 357 | 358 | response = self.__send_cmd(0xe5, echo_expected=False, response_length=2) 359 | assert response == bytes.fromhex("d606") 360 | 361 | response = self.__send_cmd(0xf7, echo_expected=False, response_length=64) 362 | assert response == bytes.fromhex("2cfd8ffb54fa2ef878007a007b00780079007a0079007800b801bf01c301ba01" 363 | "bb01be01b701b801f90203030803fb02fc020003f502f80294ff92ff8fff93ff") 364 | 365 | response = self.__send_cmd(0xf8, echo_expected=False, response_length=64) 366 | assert response == bytes.fromhex("92ff91ff96ff94ffc9fec4febdfec8fec7fec2fecffec9fe4cfe45fe3afe4afe" 367 | "48fe42fe54fe4dfe70ff70ff71ff70ff71ff71ff72ff71ff7efe7bfe7afe7efe") 368 | 369 | response = self.__send_cmd(0xfa, echo_expected=False, response_length=56) 370 | assert response == bytes.fromhex("7dfe7efe80fe7ffe90019401930192018f01900191018f0195029b0299029802" 371 | "930294029702940290fd89fd84fd90fd8dfd8cfd94fd91fd") 372 | 373 | self.__send_set_time_div(self.__ns_per_div) 374 | 375 | self.__send_cmd(0xac, parameter=bytes.fromhex("00c80002bd0002bd")) 376 | 377 | self.__send_cmd(0xe4, parameter=[0x01]) 378 | 379 | self.__send_cmd(0xe6, parameter=[0x01], echo_expected=False, response_length=10) 380 | # assert response == bytes.fromhex("eb06e606e606e706e706") 381 | 382 | self.__send_ping() 383 | 384 | self.__send_set_active_channels(self.__active_channels) 385 | 386 | self.__send_set_vertical_scale(self.__vertical_scale_factors) 387 | 388 | self.__send_set_time_div(self.__ns_per_div) 389 | 390 | self.__send_set_trigger(self.__trigger_channel, self.__trigger_slope) 391 | 392 | response = self.__send_cmd(0xa7, parameter=[0x00, 0x00], response_length=1) 393 | assert response == bytes.fromhex("00") 394 | 395 | self.__send_cmd(0xac, parameter=bytes.fromhex("0000000001000579")) 396 | 397 | self.__send_set_trigger_level(self.__trigger_level) 398 | 399 | response = self.__send_cmd(0xe9, echo_expected=False, response_length=2) 400 | assert response == bytes.fromhex("0109") 401 | 402 | def request_samples_burst_mode(self) -> Dict[int, List[int]]: 403 | """get the data""" 404 | 405 | self.__send_ping() 406 | 407 | # these two commands are not necessarily required 408 | self.__send_cmd(0xe4, parameter=[0x01]) 409 | self.__send_cmd(0xe6, parameter=[0x01], echo_expected=False, response_length=10) 410 | # response ~ e906e506e406e406e506 411 | 412 | self.__send_cmd(0xa4, parameter=[0x01], sec_till_response_request=0.015) 413 | 414 | self.__send_cmd(0xc0) 415 | 416 | self.__send_cmd(0xc2) 417 | 418 | self.__send_a55a_command() 419 | 420 | sample_response = self.__send_c6_a6_command(0x02) 421 | sample_response += self.__send_c6_a6_command(0x03) 422 | 423 | # these two commands are not necessarily required 424 | self.__send_cmd(0xe4, parameter=[0x01]) 425 | self.__send_cmd(0xe6, parameter=[0x01], echo_expected=False, response_length=10) 426 | # response ~ e806e406e506e406e406 427 | 428 | sample_shorts = Hantek1008Raw.__from_bytes_to_shorts(sample_response) 429 | 430 | per_channel_data = Hantek1008Raw.__to_per_channel_lists(sample_shorts, self.__active_channels) 431 | return per_channel_data 432 | 433 | @staticmethod 434 | def channel_count() -> int: 435 | return 8 436 | 437 | @staticmethod 438 | def valid_channel_ids() -> List[int]: 439 | return list(range(0, Hantek1008Raw.channel_count())) 440 | 441 | @staticmethod 442 | def valid_roll_mode_sampling_rates() -> List[float]: 443 | return copy.deepcopy(list(Hantek1008Raw.__roll_mode_sampling_rate_to_id_dic.keys())) 444 | 445 | @staticmethod 446 | def valid_burst_mode_ns_per_divs() -> List[float]: 447 | return copy.deepcopy(list(Hantek1008Raw.__burst_mode_ns_per_div_to_id_dic.keys())) 448 | 449 | @staticmethod 450 | def valid_vscale_factors() -> List[float]: 451 | return copy.deepcopy(Hantek1008Raw.__VSCALE_FACTORS) 452 | 453 | @staticmethod 454 | def actual_sampling_rate_factor(active_channel_count: int) -> float: 455 | """ 456 | If not all channels are used the actual sampling rate is higher than the 457 | given sampling rate. The factor describe how much higher it is, depending on the amount 458 | of active channels. 459 | :return: 460 | """ 461 | assert 1 <= active_channel_count <= Hantek1008Raw.channel_count() 462 | return [4.56, 3.03, 2.27, 1.82, 1.51, 1.3, 1.14, 1.00][active_channel_count-1] 463 | 464 | def request_samples_roll_mode_single_row(self, **argv) \ 465 | -> Generator[Dict[int, int], None, None]: 466 | for per_channel_data in self.request_samples_roll_mode(**argv): 467 | for row in list(zip(*per_channel_data.values())): 468 | yield dict(zip(per_channel_data.keys(), row)) 469 | 470 | def request_samples_roll_mode(self, sampling_rate: int = 440) \ 471 | -> Generator[Dict[int, List[int]], None, None]: 472 | 473 | assert sampling_rate in Hantek1008Raw.__roll_mode_sampling_rate_to_id_dic, \ 474 | f"sample_rate must be in {Hantek1008Raw.__roll_mode_sampling_rate_to_id_dic.keys()}" 475 | 476 | try: 477 | # sets the sample rate: 18 -> 440 samples/sec/channel 478 | sample_rate_id = Hantek1008Raw.__roll_mode_sampling_rate_to_id_dic[sampling_rate] 479 | self.__send_cmd(0xa3, parameter=[sample_rate_id]) 480 | 481 | self.__send_ping(sec_till_start=0.0100) 482 | 483 | self.__send_cmd(0xa4, parameter=[0x02]) 484 | 485 | # pipe error if a3 cmd/__send_set_time_div was not with parameter 1a/ 486 | self.__send_cmd(0xc0) 487 | 488 | self.__send_cmd(0xc2) 489 | 490 | while True: 491 | ready_data_length = 0 492 | while ready_data_length == 0: 493 | self.__send_ping() 494 | 495 | response = self.__send_cmd(0xc7, response_length=2, echo_expected=False) 496 | ready_data_length = int.from_bytes(response, byteorder="big", signed=False) 497 | # ready_data_length = 498 | # (active_channels + ONE_MYSTIC_EXTRA_CHANNEL) * TWO_BYTES_PER_SAMPLE * row_count 499 | assert ready_data_length % ((len(self.__active_channels) + 1)*2) == 0 500 | 501 | sample_response = b'' 502 | while ready_data_length > 0: 503 | sample_response_part = self.__send_cmd(0xc8, response_length=64, echo_expected=False) 504 | 505 | if ready_data_length < 64: 506 | # remove zeros at the end 507 | sample_response_part = sample_response_part[0:ready_data_length] 508 | 509 | ready_data_length -= 64 510 | sample_response += sample_response_part 511 | 512 | sample_shorts = Hantek1008Raw.__from_bytes_to_shorts(sample_response) 513 | # in rolling mode there is an additional 9th channel, with values around 1742 514 | # this channel will not be past to the caller 515 | per_channel_data = self.__to_per_channel_lists(sample_shorts, self.__active_channels, 516 | expect_ninth_channel=True) 517 | yield per_channel_data 518 | except GeneratorExit: 519 | # TODO: auto start pause tread? 520 | pass 521 | 522 | def get_zero_offsets(self) -> Optional[Dict[float, List[float]]]: 523 | return copy.deepcopy(self._zero_offsets) 524 | 525 | def get_zero_offset(self, channel_id: int, vscale: Optional[float] = None) -> Optional[float]: 526 | assert channel_id in Hantek1008Raw.valid_channel_ids() 527 | assert vscale is None or vscale in Hantek1008Raw.valid_vscale_factors() 528 | 529 | # if this methode is called before init/connect zero_offset will be null 530 | if self._zero_offsets is None: 531 | return None 532 | 533 | if vscale is None: 534 | vscale = self.get_vscale(channel_id) 535 | 536 | return self._zero_offsets[vscale][channel_id] 537 | 538 | @staticmethod 539 | def get_generator_waveform_max_length() -> int: 540 | return 1440 541 | 542 | def set_generator_on(self, turn_on: bool) -> None: 543 | # TODO not tested 544 | self.__send_cmd(0xb7, parameter=[0x00]) 545 | 546 | self.__send_cmd(0xbb, parameter=[0x08, 0x01 if turn_on else 0x00]) 547 | 548 | def set_generator_speed(self, speed_in_rpm: int) -> None: 549 | # TODO speed_in_rpm must be round to valid values, dont know how 550 | def compute_pulse_length(speed_in_rpm: int, bits_per_wave: int = 8) -> int: 551 | assert 1 <= speed_in_rpm <= 750_000 552 | assert 1 <= bits_per_wave <= Hantek1008Raw.get_generator_waveform_max_length() 553 | # TODO values great then 750_000 are possible too, but then the decoding changes (firt paramter gets a 02) 554 | # and this other decoding is not completely understood 555 | return int(((8 * 360_000_000) / bits_per_wave) / speed_in_rpm) 556 | 557 | assert compute_pulse_length(300_000) == 1200 558 | 559 | pulse_length = compute_pulse_length(speed_in_rpm) 560 | parameter = bytes.fromhex("01") + pulse_length.to_bytes(length=4, byteorder='little', signed=False) 561 | assert len(parameter) == 1 + 4 562 | self.__send_cmd(0xb9, parameter=parameter) 563 | 564 | def set_generator_waveform(self, waveform: List[int]) -> None: 565 | """ 566 | Every Byte in the waveform list contains information for every of the 8 digital ouputs to be on or of. 567 | The bit number i in one of those bytes tells if output i should be on or off in that part of the wave. 568 | :param waveform: 569 | :return: 570 | """ 571 | # TODO not tested 572 | # example for waveform: F0 0F F0 0F 573 | # -> switches the output of every channel at every pulse 574 | # ch1 to ch4 start with down, ch5 to ch8 start up 575 | assert len(waveform) <= Hantek1008Raw.get_generator_waveform_max_length() 576 | assert len(waveform) <= 62, "Currently not supported" 577 | assert all(b <= 0b1111_1111 for b in waveform) 578 | 579 | self.__send_cmd(0xb7, parameter=[0x00]) 580 | 581 | # send the length of the waveform in bytes 582 | self.__send_cmd(0xbf, parameter=int.to_bytes(len(waveform), length=2, byteorder="little", signed=False)) 583 | 584 | zeros = [0] * (62 - len(waveform)) 585 | self.__send_cmd(0xb8, parameter=[0x01] + waveform + zeros) 586 | 587 | def __loop_f3(self) -> None: 588 | log.debug("start pause thread") 589 | while not self.__cancel_pause_thread: 590 | self.__send_ping() 591 | sleep(0.01) 592 | log.debug("stop pause thread") 593 | 594 | def pause(self) -> None: 595 | if self.is_paused(): 596 | raise RuntimeError("Can't pause because device is already pausing") 597 | self.__cancel_pause_thread = False 598 | self.__pause_thread = Thread(target=self.__loop_f3) 599 | self.__pause_thread.start() 600 | 601 | def cancel_pause(self) -> None: 602 | if not self.is_paused(): 603 | raise RuntimeError("Can't cancel pause because device is not paused") 604 | assert self.__pause_thread is not None 605 | self.__cancel_pause_thread = True 606 | self.__pause_thread.join() 607 | self.__pause_thread = None 608 | 609 | def is_paused(self) -> bool: 610 | return self.__pause_thread is not None 611 | 612 | def close(self) -> None: 613 | if self.is_paused(): 614 | self.cancel_pause() 615 | 616 | # read maybe leftover data 617 | self.__clear_leftover() 618 | self.__send_ping() 619 | self.__send_cmd(0xf4) 620 | self._dev.reset() 621 | 622 | def __clear_leftover(self) -> None: 623 | """ 624 | If a __send_cmd was canceled after the write but before the read, the Hantek device 625 | still wants to send the answer. This method will try to read such a leftover answer 626 | if there is there is one 627 | :return: 628 | """ 629 | try: 630 | response = bytes(self.__in.read(64, timeout=100)) 631 | except usb.core.USBError: 632 | log.debug("no left over data") 633 | pass 634 | else: 635 | log.debug(f"left over data: {response.hex()}") 636 | 637 | def get_vscales(self) -> List[float]: 638 | return copy.deepcopy(list(self.__vertical_scale_factors)) 639 | 640 | def get_vscale(self, channel_id: int) -> float: 641 | assert channel_id in Hantek1008Raw.valid_channel_ids() 642 | return self.__vertical_scale_factors[channel_id] 643 | 644 | def get_active_channels(self) -> List[int]: 645 | return copy.deepcopy(self.__active_channels) 646 | 647 | @staticmethod 648 | def __from_bytes_to_shorts(data: bytes) -> List[int]: 649 | """Take two following bytes to build a integer (using little endianess) """ 650 | assert len(data) % 2 == 0 651 | return [data[i] + data[i + 1] * 256 for i in range(0, len(data), 2)] 652 | 653 | @staticmethod 654 | def __to_per_channel_lists(shorts: List[int], active_channels: List[int], expect_ninth_channel: bool = False 655 | ) -> Dict[int, List[int]]: 656 | """Create a dictionary (of the size of 'channel_count') of lists, 657 | where the dictionary at key x contains the data for channel x+1 of the hantek device. 658 | In rolling mode there is an additional 9th channel, with values around 1742 this 659 | channel will not be past to the caller. 660 | """ 661 | active_channels = sorted(active_channels) 662 | active_channel_count = len(active_channels) 663 | real_channel_count = active_channel_count 664 | if expect_ninth_channel: 665 | real_channel_count += 1 666 | return {active_channels[i]: shorts[i::real_channel_count] 667 | for i in range(0, active_channel_count)} 668 | 669 | 670 | """ 671 | Below goes stuff that is needed for more advanced features 672 | """ 673 | 674 | # list of dicts of lists of dicts 675 | # usecase: __correction_data[channel_id][vscale][..] = {"units":..., "factor": ...} 676 | CorrectionDataType = List[Dict[float, Dict[float, float]]] 677 | 678 | # a function that awaits an channel id [0,7], vscale and a deltatime (time in sec since creation of this class) 679 | # it computes a correction factor that can be applied (added) to the normal zero_offset 680 | ZeroOffsetShiftCompensationFunctionType = Callable[[int, float, float], float] 681 | 682 | 683 | class Hantek1008(Hantek1008Raw): 684 | """ 685 | A more advanced version of Hantek1008Raw. It features raw values to voltage conversion 686 | , usage of external generated calibration data and zero offset shift calibration compensation. 687 | """ 688 | 689 | def __init__(self, ns_per_div: int = 500_000, 690 | vertical_scale_factor: Union[float, List[float]] = 1.0, 691 | active_channels: Optional[List[int]] = None, 692 | correction_data: Optional[CorrectionDataType] = None, 693 | zero_offset_shift_compensation_channel: Optional[int] = None, 694 | zero_offset_shift_compensation_function: Optional[ZeroOffsetShiftCompensationFunctionType] = None, 695 | zero_offset_shift_compensation_function_time_offset_sec: int = 0) -> None: 696 | 697 | if active_channels is None: 698 | active_channels = Hantek1008Raw.valid_channel_ids() 699 | if correction_data is None: 700 | correction_data = [{} for _ in range(Hantek1008Raw.channel_count())] 701 | 702 | assert len(correction_data) == Hantek1008Raw.channel_count() 703 | assert all(isinstance(x, dict) for x in correction_data) 704 | 705 | assert zero_offset_shift_compensation_channel is None or zero_offset_shift_compensation_function is None 706 | if zero_offset_shift_compensation_channel is not None: 707 | assert zero_offset_shift_compensation_channel not in active_channels 708 | assert zero_offset_shift_compensation_channel in Hantek1008Raw.valid_channel_ids() 709 | assert zero_offset_shift_compensation_channel not in active_channels 710 | active_channels = active_channels + [zero_offset_shift_compensation_channel] 711 | 712 | Hantek1008Raw.__init__(self, ns_per_div, vertical_scale_factor, active_channels) 713 | 714 | self.__correction_data: CorrectionDataType = copy.deepcopy(correction_data) 715 | 716 | self.__zero_offset_shift_compensation_channel: Optional[int] = zero_offset_shift_compensation_channel 717 | self.__zero_offset_shift_compensation_value: float = 0.0 718 | 719 | self.__zero_offset_shift_compensation_function: Optional[ZeroOffsetShiftCompensationFunctionType] \ 720 | = zero_offset_shift_compensation_function 721 | self.__start_monotonic_time = time.monotonic() - zero_offset_shift_compensation_function_time_offset_sec 722 | 723 | def get_used_zero_offsets_shift_compensation_method(self)-> Optional[str]: 724 | assert not (self.__zero_offset_shift_compensation_channel 725 | and self.__zero_offset_shift_compensation_function) 726 | if self.__zero_offset_shift_compensation_channel: 727 | return f"channel {self.__zero_offset_shift_compensation_channel}" 728 | if self.__zero_offset_shift_compensation_function: 729 | return f"function {self.__zero_offset_shift_compensation_function}" 730 | return None 731 | 732 | def __update_zero_offset_compensation_value(self, zero_readings: List[int]) -> None: 733 | # TODO problem zero offset different on different vscales? 734 | assert self.__zero_offset_shift_compensation_channel is not None 735 | assert self._zero_offsets is not None 736 | zoscc_vscale = Hantek1008Raw.get_vscale(self, self.__zero_offset_shift_compensation_channel) 737 | assert zoscc_vscale == 1.0 # is this really necessary? 738 | zoscc_zero_offset = self._zero_offsets[zoscc_vscale][self.__zero_offset_shift_compensation_channel] 739 | 740 | adaption_factor = 0.00002 # [0,1] 741 | for v in zero_readings: 742 | # print("v", v, "zo", zoscc_zero_offset) 743 | delta = v - zoscc_zero_offset 744 | self.__zero_offset_shift_compensation_value = \ 745 | (1.0 - adaption_factor) * self.__zero_offset_shift_compensation_value \ 746 | + adaption_factor * delta 747 | log.debug("zosc-value", self.__zero_offset_shift_compensation_value) 748 | 749 | @overrides 750 | def get_zero_offset(self, channel_id: int, vscale: Optional[float] = None) -> float: 751 | if vscale is None: 752 | vscale = Hantek1008Raw.get_vscale(self, channel_id) 753 | 754 | zero_offset = Hantek1008Raw.get_zero_offset(self, channel_id, vscale) 755 | assert zero_offset is not None 756 | if self.__zero_offset_shift_compensation_channel is not None: 757 | zero_offset += self.__zero_offset_shift_compensation_value 758 | if self.__zero_offset_shift_compensation_function is not None: 759 | delta_sec = time.monotonic() - self.__start_monotonic_time 760 | zero_offset += self.__zero_offset_shift_compensation_function(channel_id, vscale, delta_sec) 761 | return zero_offset 762 | 763 | @overrides(check_signature=False) 764 | def request_samples_roll_mode_single_row(self, **argv)\ 765 | -> Generator[Dict[int, float], None, None]: 766 | for per_channel_data in self.request_samples_roll_mode(**argv): 767 | for row in list(zip(*per_channel_data.values())): 768 | yield dict(zip(per_channel_data.keys(), row)) 769 | 770 | @overrides(check_signature=False) 771 | def request_samples_roll_mode(self, sampling_rate: int = 440, mode: str = "volt") \ 772 | -> Generator[Dict[int, Union[List[float], List[int]]], None, None]: 773 | 774 | assert mode in ["volt", "raw", "volt+raw"] 775 | active_channel_count = len(Hantek1008Raw.get_active_channels(self)) 776 | 777 | for raw_per_channel_data in Hantek1008Raw.request_samples_roll_mode(self, sampling_rate): 778 | assert len(raw_per_channel_data) == active_channel_count 779 | yield self.__process_raw_per_channel_data(raw_per_channel_data, mode) 780 | 781 | def __remove_zosc_channel_data(self, per_channel_data: Dict[int, Union[List[int], List[float]]]) -> None: 782 | if self.__zero_offset_shift_compensation_channel is not None: 783 | if self.__zero_offset_shift_compensation_channel in per_channel_data: 784 | del per_channel_data[self.__zero_offset_shift_compensation_channel] 785 | if self.__zero_offset_shift_compensation_channel + Hantek1008Raw.channel_count() in per_channel_data: 786 | del per_channel_data[self.__zero_offset_shift_compensation_channel] 787 | 788 | def __extract_channel_volts(self, per_channel_data: Dict[int, List[int]]) -> Dict[int, List[float]]: 789 | """Extract the voltage values from the raw byte array that came from the device""" 790 | if self.__zero_offset_shift_compensation_channel is not None: 791 | self.__update_zero_offset_compensation_value( 792 | per_channel_data[self.__zero_offset_shift_compensation_channel]) 793 | return {ch: self.__raw_to_volt(channel_data, ch) for ch, channel_data in per_channel_data.items()} 794 | 795 | def __raw_to_volt(self, raw_values: List[int], channel_id: int) -> List[float]: 796 | """Convert the raw shorts to useful volt values""" 797 | vscale = 1.0 798 | zero_offset = 2048 799 | 800 | if channel_id < Hantek1008Raw.channel_count(): 801 | vscale = Hantek1008Raw.get_vscale(self, channel_id) 802 | # get right zero offset for that channel and the used vertical scale factor (vscale) 803 | zero_offset = self.get_zero_offset(channel_id, vscale) 804 | 805 | scale = 0.01 * vscale 806 | 807 | # accuracy = -int(math.log10(scale)) + 2 # amount of digits after the dot that is not nearly random 808 | accuracy = [3, 4, 5][Hantek1008Raw._vertical_scale_factor_to_id(vscale) - 1] 809 | return [round( 810 | self.__calc_correction_factor(v - zero_offset, channel_id, vscale) * (v - zero_offset) * scale 811 | , ndigits=accuracy) 812 | for v in raw_values] 813 | 814 | def __calc_correction_factor(self, delta_to_zero: float, channel_id: int, vscale: float) -> float: 815 | """ 816 | Compute a correction factor based on the given calibration data. 817 | Always returns 1.0 if no calibration data for the requested channel or at all is available. 818 | :param delta_to_zero: 819 | :param channel_id: 820 | :param vscale: 821 | :return: 822 | """ 823 | if channel_id not in Hantek1008Raw.valid_channel_ids() \ 824 | or vscale not in self.__correction_data[channel_id]: 825 | return 1.0 826 | 827 | channel_cd = self.__correction_data[channel_id][vscale] 828 | 829 | if len(channel_cd) == 0: 830 | return 1.0 831 | 832 | if len(channel_cd) == 1: 833 | return channel_cd[0] 834 | 835 | units_less, cfactor_less = max(((key, value) 836 | for key, value 837 | in channel_cd.items() 838 | if key <= delta_to_zero), default=(None, None)) 839 | units_greater, cfactor_greater = min(((key, value) 840 | for key, value 841 | in channel_cd.items() 842 | if key >= delta_to_zero), default=(None, None)) 843 | assert units_less is not None or units_greater is not None 844 | if units_less is None: 845 | return cfactor_less 846 | if units_greater is None: 847 | return cfactor_greater 848 | 849 | alpha = (delta_to_zero - units_less) / (units_greater - units_less) 850 | return (1.0 - alpha) * cfactor_less + alpha * cfactor_greater 851 | 852 | def __process_raw_per_channel_data(self, raw_per_channel_data: Dict[int, List[int]], mode: str 853 | ) -> Dict[int, Union[List[int], List[float]]]: 854 | assert mode in ["raw", "volt", "volt+raw"] 855 | result: Dict[int, Union[List[float], List[int]]] = {} 856 | if "volt" in mode: 857 | result.update(self.__extract_channel_volts(raw_per_channel_data)) 858 | if "raw" in mode: 859 | raw_channel_offset = Hantek1008Raw.channel_count() if mode == "volt+raw" else 0 860 | result.update({ch + raw_channel_offset: values 861 | for ch, values in raw_per_channel_data.items()}) 862 | self.__remove_zosc_channel_data(result) 863 | return result 864 | 865 | @overrides(check_signature=False) 866 | def request_samples_burst_mode(self, mode: str = "volt" 867 | ) -> Dict[int, Union[List[int], List[float]]]: 868 | assert self.__zero_offset_shift_compensation_channel is None, \ 869 | "zero offset shift compensation is not implemented for burst mode" 870 | raw_per_channel_data = Hantek1008Raw.request_samples_burst_mode(self) 871 | return self.__process_raw_per_channel_data(raw_per_channel_data, mode) 872 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mfg92/hantek1008py/302e45ad9d47dafec8a57c36aaca0b76647b221d/utils/__init__.py -------------------------------------------------------------------------------- /utils/common.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import List, Callable, Tuple, IO, TextIO, Optional 3 | import lzma 4 | import time 5 | import math 6 | from abc import ABCMeta, abstractmethod 7 | import argparse 8 | 9 | measured_sampling_rate_regex = re.compile(r"(# measured samplingrate:)\s+(\d*\.?\d+)\s*(hz)", 10 | re.IGNORECASE) 11 | sampling_rate_regex = re.compile(r"(# samplingrate:)\s+(\d*\.?\d+)\s*(hz)", re.IGNORECASE) 12 | unix_time_regex = re.compile(r"(# unix-time:)\s+(\d*\.?\d+)\s*", re.IGNORECASE) 13 | 14 | 15 | def csv_file_type(file_path: str) -> TextIO: 16 | """for use in argparse as type=""" 17 | try: 18 | return open_csv_file(file_path) 19 | except: 20 | raise argparse.ArgumentTypeError(f"There is no file '{file_path}' or can not open it.") 21 | 22 | 23 | def parse_csv_lines(lines: List[str]) \ 24 | -> Tuple[List[float], List[float], List[float], List[List[float]]]: 25 | measured_sampling_rate = [float(measured_sampling_rate_regex.search(line).group(2)) 26 | for line in lines 27 | if line[0] == "#" and measured_sampling_rate_regex.search(line)] 28 | 29 | sampling_rate = [float(sampling_rate_regex.search(line).group(2)) for line in lines 30 | if line[0] == "#" and sampling_rate_regex.search(line)] 31 | 32 | unix_time = [float(unix_time_regex.search(line).group(2)) for line in lines 33 | if line[0] == "#" and unix_time_regex.search(line)] 34 | 35 | values = [[float(v) for v in line.split(",")] 36 | for line in lines 37 | if line[0] != "#"] 38 | 39 | per_channel_data = list(zip(*values)) 40 | 41 | return sampling_rate, measured_sampling_rate, unix_time, per_channel_data 42 | 43 | 44 | def open_csv_file(file_name: str, mode: str="rt") -> IO: 45 | open_function = lzma.open if file_name.endswith(".xz") else open 46 | return open_function(file_name, mode=mode) 47 | 48 | 49 | def read_csv_file(file_name: str) -> List[str]: 50 | with open_csv_file(file_name) as f: 51 | return f.readlines() 52 | 53 | 54 | def parse_csv_file(file_name: str) \ 55 | -> Tuple[List[float], List[float], List[float], List[List[float]]]: 56 | """ 57 | Parse a file chunk for chunk: reading up to chunk_size bytes, parse them 58 | and finally merge all data of all pared chunks together 59 | :param file_name: 60 | :return: 61 | """ 62 | sampling_rate: List[float] = [] 63 | measured_sampling_rate: List[float] = [] 64 | unix_time: List[float] = [] 65 | per_channel_data: Optional[List[List[float]]] = None 66 | 67 | def on_parse_func(sampling_rate_part, measured_sampling_rate_part, unix_time_part, per_channel_data_part) -> None: 68 | nonlocal sampling_rate, measured_sampling_rate, unix_time, per_channel_data 69 | 70 | only_comments = False if per_channel_data_part else True 71 | 72 | if per_channel_data is None and not only_comments: 73 | per_channel_data = [[] for _ in range(len(per_channel_data_part))] 74 | 75 | sampling_rate.extend(sampling_rate_part) 76 | measured_sampling_rate.extend(measured_sampling_rate_part) 77 | unix_time.extend(unix_time_part) 78 | if not only_comments: 79 | assert len(per_channel_data) == len(per_channel_data_part) 80 | for index in range(len(per_channel_data_part)): 81 | per_channel_data[index].extend(per_channel_data_part[index]) 82 | 83 | parse_csv_file_chunked(file_name, on_parse_func) 84 | return sampling_rate, measured_sampling_rate, unix_time, per_channel_data 85 | 86 | 87 | def parse_csv_file_chunked(file_name: str, on_parse_func: Callable[[List[float], List[float], List[float], List[List[float]]], None], 88 | chunk_size: int = 2**10): 89 | with open_csv_file(file_name) as f: 90 | while True: 91 | lines_part = f.readlines(chunk_size) 92 | 93 | if len(lines_part) == 0: 94 | break 95 | 96 | sampling_rate_part, measured_sampling_rate_part, unix_time_part, per_channel_data_part = parse_csv_lines(lines_part) 97 | 98 | on_parse_func(sampling_rate_part, measured_sampling_rate_part, unix_time_part, per_channel_data_part) 99 | 100 | 101 | class FileChangeReader: 102 | def __init__(self, file_path: str, ignore_existing_file_content: bool = True) -> None: 103 | self.__file_path: str = file_path 104 | self.__stream_position: int = 0 105 | 106 | if ignore_existing_file_content: 107 | with self.__open()(self.__file_path, "r") as file: 108 | file.seek(0, 2) # jump to the end 109 | self.__stream_position = file.tell() 110 | 111 | def __open(self): 112 | return open if not self.__file_path.endswith(".xz") else lzma.open 113 | 114 | def read_changed_lines(self) -> List[str]: 115 | with self.__open()(self.__file_path, "r") as file: 116 | file.seek(0, 2) 117 | if file.tell() == self.__stream_position: 118 | return [] # file size did not change 119 | 120 | file.seek(self.__stream_position, 0) 121 | lines = file.readlines() 122 | self.__stream_position = file.tell() 123 | return lines 124 | 125 | 126 | class ChannelDataUpdater(metaclass=ABCMeta): 127 | @abstractmethod 128 | def get_channel_data(self, channel_id: int) -> List[float]: 129 | return [] 130 | 131 | @abstractmethod 132 | def update(self): 133 | pass 134 | 135 | 136 | class CsvChannelDataUpdater(ChannelDataUpdater): 137 | def __init__(self, file: FileChangeReader, buffer_size: int) -> None: 138 | self.__file: FileChangeReader = file 139 | self.__buffer_size: int = buffer_size 140 | self.__channel_data: List[List[float]] = [[] for _ in range(8)] 141 | self.__sampling_rate: float = None 142 | 143 | def get_channel_data(self, channel_id: int) -> List[float]: 144 | return self.__channel_data[channel_id] 145 | 146 | def update(self): 147 | lines = self.__file.read_changed_lines() 148 | if not lines: 149 | return 150 | 151 | sampling_rate, measured_sampling_rate, per_channel_data = parse_csv_lines(lines) 152 | 153 | self.__sampling_rate = [self.__sampling_rate, *sampling_rate, *measured_sampling_rate][-1] 154 | 155 | for channel_id in range(8): 156 | if channel_id < len(per_channel_data): 157 | self.__channel_data[channel_id].extend(per_channel_data[channel_id]) 158 | # trim to self.__channel_data_max_len 159 | del self.__channel_data[channel_id][:-self.__buffer_size] 160 | 161 | 162 | class DemoChannelDataUpdater(ChannelDataUpdater): 163 | def __init__(self, sampling_rate: int, buffer_size: int) -> None: 164 | self.__sampling_rate: int = sampling_rate 165 | self.__buffer_size: int = buffer_size 166 | self.__channel_data: List[List[float]] = [[] for _ in range(8)] 167 | self.__time_of_last_update: float = time.time() 168 | 169 | def get_channel_data(self, channel_id: int) -> List[float]: 170 | return self.__channel_data[channel_id] 171 | 172 | def update(self): 173 | now = time.time() 174 | delta = now - self.__time_of_last_update # time passed since last demo calculations 175 | for channel_id in range(8): 176 | for i in range(round(delta * self.__sampling_rate)): 177 | t = self.__time_of_last_update + (i / self.__sampling_rate) 178 | amplitude = 0.2 + 1.8 * (channel_id / 8) 179 | # x_scale = 3 + 12.987 * (1 / 8) * channel_id 180 | x_scale = 10 181 | # x_offset = channel_id * 42.123 182 | x_offset = 0 183 | if channel_id == 7: 184 | x_offset = 0.5/8 185 | y = amplitude * math.sin((x_offset + t*x_scale) * 2*math.pi) 186 | if channel_id == 0: 187 | # y += amplitude*0.5 * math.sin(x_offset + t * math.pi * x_scale*3) 188 | # y += amplitude*0.5 * math.sin((x_offset+0.5 + t*x_scale*2) * math.pi) 189 | y += amplitude*0.5 * math.sin((x_offset+0 + t*x_scale*2) * 2*math.pi) 190 | 191 | self.__channel_data[channel_id].append(y) 192 | # trim to self.__channel_data_max_len 193 | del self.__channel_data[channel_id][:-self.__buffer_size] 194 | self.__time_of_last_update = now 195 | 196 | 197 | class EmptyChannelDataUpdater(ChannelDataUpdater): 198 | def get_channel_data(self, channel_id: int) -> List[float]: 199 | return [] 200 | 201 | def update(self): 202 | pass 203 | -------------------------------------------------------------------------------- /utils/csvwriter.py: -------------------------------------------------------------------------------- 1 | from typing import List, Any, Sequence, Callable, IO 2 | import threading 3 | import queue 4 | import csv 5 | 6 | # marking a child classes method with overrides makes sure the method overrides a parent class method 7 | # this check is only needed during development so its no problem if this package is not installed 8 | # to avoid errors we need to define a dummy decorator 9 | try: 10 | from overrides import overrides 11 | except ImportError: 12 | def overrides(method: Callable) -> Callable: 13 | return method 14 | 15 | 16 | class CsvWriter: 17 | 18 | def __init__(self, file: IO[str], delimiter: str) -> None: 19 | self.__csv_file = file 20 | self.__csv_writer = csv.writer(file, delimiter=delimiter) 21 | 22 | def write_comment(self, comment: str) -> None: 23 | self.__csv_file.write(f"# {comment}\n") 24 | 25 | def write_row(self, row: Sequence[Any]) -> None: 26 | self.__csv_writer.writerow(row) 27 | 28 | def write_rows(self, rows: Sequence[Sequence[Any]]) -> None: 29 | self.__csv_writer.writerows(rows) 30 | 31 | def close(self) -> None: 32 | self.__csv_file.close() 33 | 34 | 35 | class ThreadedCsvWriter(CsvWriter): 36 | """ 37 | Writes content to a csv file using an extra thread 38 | """ 39 | 40 | def __init__(self, file: IO[str], delimiter: str) -> None: 41 | super().__init__(file, delimiter) 42 | self.__closed: bool = False 43 | self.__work_queue: queue.Queue = queue.Queue() # a thread-safe FIFO queue 44 | self.__work_thread = threading.Thread(target=self.__do_work) 45 | self.__work_thread.start() 46 | 47 | @overrides 48 | def write_comment(self, comment: str) -> None: 49 | self.__enqueue_work(super().write_comment, comment) 50 | 51 | @overrides 52 | def write_row(self, row: Sequence[Any]) -> None: 53 | self.__enqueue_work(super().write_row, row) 54 | 55 | @overrides 56 | def write_rows(self, rows: Sequence[Sequence[Any]]) -> None: 57 | self.__enqueue_work(super().write_rows, rows) 58 | 59 | def __enqueue_work(self, func: Callable, *params: Any) -> None: 60 | self.__work_queue.put((func, params)) 61 | 62 | def __do_work(self) -> None: 63 | while not self.__closed: 64 | func, params = self.__work_queue.get() 65 | func(*params) 66 | 67 | def close(self) -> None: 68 | def stop() -> None: 69 | self.__closed = True 70 | # super without arguments does not work here inside a locally defined function 71 | super(ThreadedCsvWriter, self).close() 72 | self.__enqueue_work(stop) 73 | self.__work_thread.join() 74 | -------------------------------------------------------------------------------- /utils/electro.py: -------------------------------------------------------------------------------- 1 | from typing import List, Union, Tuple, Optional 2 | from math import sqrt, log 3 | import numpy as np 4 | 5 | """ 6 | EeMt: Einführung in die elektrische Messtechnik, 3. Auflage 7 | """ 8 | 9 | 10 | def rms(data: Union[List[float], np.ndarray]) -> float: 11 | """ 12 | Calculates the 'Root Mean Square' (Effektivwert) 13 | EeMt 5.2.3 P. 109 14 | :param data: 15 | :return: 16 | """ 17 | return np.sqrt(np.mean(np.array(data)**2)) 18 | # return sqrt(sum(v*v for v in data)/len(data)) 19 | 20 | 21 | def interpolate(samples: Union[List[float], np.ndarray], index: int, mode: str) -> float: 22 | """ 23 | This is no normal interpolation (find a value between two sample points) instead this finds the 24 | position of a peak 25 | :param samples: 26 | :param index: 27 | :param mode: 28 | :return: 29 | """ 30 | assert mode in {"none", "parabolic", "gaussian"} 31 | if mode == "parabolic": 32 | return parabolic_interpolation(samples, index) 33 | elif mode == "gaussian": 34 | return gaussian_interpolation(samples, index) 35 | return index 36 | 37 | 38 | def parabolic_interpolation(data: List[float], local_max_index: int) -> float: 39 | assert 0 <= local_max_index <= len(data)-1 40 | i_max = local_max_index 41 | if i_max == 0 or i_max == len(data)-1: 42 | return data[i_max] 43 | v_max = data[i_max] 44 | v_left = data[local_max_index - 1] 45 | v_right = data[local_max_index + 1] 46 | return i_max + (v_right - v_left)/(2*(2*v_max - v_right - v_left)) 47 | 48 | 49 | def gaussian_interpolation(data: List[float], local_max_index: int) -> float: 50 | """ 51 | Like parabolic_interpolation but the natural logarithm of the values is used for interpolation 52 | :param data: 53 | :param local_max_index: 54 | :return: 55 | """ 56 | neigbours_ln = [log(v) for v in data[local_max_index-1:local_max_index+2]] 57 | return local_max_index + parabolic_interpolation(neigbours_ln, 1) 58 | 59 | 60 | def measure_main_frequency_zero_crossing(data: List[float], sampling_rate: float, calc_offset: bool = False) \ 61 | -> Optional[float]: 62 | assert sampling_rate > 0 63 | assert not calc_offset, "Offset calculation is not supported yet" 64 | 65 | data_dc = np.mean(data) 66 | data_ac = [x - data_dc for x in data] # remove dc part of signal 67 | 68 | # data_ac[i - 1] * data_ac[i] < 0 only if one sample point is negative and the other one is positive 69 | zero_crossings = [i for i in range(1, len(data_ac)) if data_ac[i - 1] * data_ac[i] < 0.0] 70 | 71 | if len(zero_crossings) < 2: 72 | return None 73 | 74 | def abs_interpolate(a, b): 75 | return abs(a) / (abs(a) + abs(b)) 76 | 77 | start = zero_crossings[0] 78 | end = zero_crossings[-1] 79 | # interpolate at what position the signal crosses the zero/avg 80 | start = start - abs_interpolate(data_ac[start], data_ac[start - 1]) 81 | end = end - abs_interpolate(data_ac[end], data_ac[end - 1]) 82 | 83 | half_wave_length_avg = (end - start) / (len(zero_crossings) - 1) / sampling_rate 84 | frequency = 1.0 / (2 * half_wave_length_avg) 85 | 86 | # delta = (end - start) * (1.0 / sampling_rate) 87 | # periods = (len(zero_crossings) - 1) / 2 88 | # frequency = 1.0 / (delta / periods) 89 | return frequency 90 | 91 | 92 | def measure_main_frequency_fft(samples: List[float], sampling_rate: float, mode: str = "parabolic") -> float: 93 | fourier = np.fft.rfft(samples * np.blackman(len(samples))) 94 | # blackman is better for main frequency estimation using parabolic or Gaussian interpolation 95 | # according to FFT_resol_note.pdf (IMPROVING FFT FREQUENCY MEASUREMENT RESOLUTION BY PARABOLIC 96 | # AND GAUSSIAN INTERPOLATION) 97 | 98 | fourier_amplitude = np.absolute(fourier) 99 | fourier_frequency = np.fft.rfftfreq(n=len(samples), d=1.0 / sampling_rate) 100 | fourier_frequency_step_width = fourier_frequency[1] 101 | # get the highest value (+ index of that) 102 | max_index, _ = max(enumerate(fourier_amplitude), key=lambda v: v[1]) 103 | return interpolate(fourier_amplitude, max_index, mode) * fourier_frequency_step_width 104 | 105 | 106 | # https://de.wikipedia.org/wiki/Autokorrelation#Finden_von_Signalperioden 107 | # https://stackoverflow.com/questions/13439718/how-to-interpret-numpy-correlate-and-numpy-corrcoef-values/37886856#37886856 108 | def measure_main_frequency_autocorrelate(samples: List[float], sampling_rate: float, mode: str = "parabolic") -> float: 109 | assert mode in {"max", "parabolic", "gaussian"} 110 | auto = np.correlate(samples, samples, mode="full") 111 | auto = auto[round(len(auto) / 2):] 112 | 113 | def find_local_peak(data: List[float], from_pos: int): 114 | for i in range(from_pos+1, len(data)-1): 115 | l, m, r = data[i-1:i+2] 116 | if l < m > r or l > m < r: 117 | return i 118 | return None 119 | 120 | first_min_index = find_local_peak(auto, 0) 121 | if first_min_index is None: 122 | return -1 123 | second_max_index = find_local_peak(auto, first_min_index) 124 | if second_max_index is None: 125 | return -1 126 | return sampling_rate / interpolate(auto, second_max_index, mode) 127 | 128 | 129 | # for phase angles between 0 and 180 degrees 130 | # https://dsp.stackexchange.com/questions/8673/best-method-to-extract-phase-shift-between-2-sinosoids-from-data-provided/26012#26012 131 | def measure_offset_signum(samples_a: List[float], samples_b: List[float]) -> float: 132 | signum_a = np.sign(samples_a - np.mean(samples_a)) 133 | signum_b = np.sign(samples_b - np.mean(samples_b)) 134 | return 90 - 90*(np.mean(signum_a*signum_b)) # offset in degrees [0-180] 135 | 136 | 137 | def measure_offset_correlate(samples_a: List[float], samples_b: List[float], 138 | sampling_rate: float, mode: str = "parabolic") -> float: 139 | auto = np.correlate(samples_a, samples_b, mode="full") 140 | best_offset = np.argmax(auto) 141 | return interpolate(auto, best_offset, mode) * (1/sampling_rate) # offset in sec 142 | 143 | 144 | # https://stackoverflow.com/questions/27545171/identifying-phase-shift-between-signals/27546385#27546385 145 | def measure_offset_fft(samples_a: List[float], 146 | samples_b: List[float], 147 | mode: str = "parabolic") -> float: 148 | assert mode in {"max", "parabolic", "gaussian"} 149 | fourier_a, fourier_b = np.fft.rfft(samples_a), np.fft.rfft(samples_b) 150 | max_index, _ = max(enumerate(np.absolute(fourier_a)), key=lambda v: v[1]) 151 | return np.angle(fourier_a[max_index] / fourier_b[max_index], deg=True) # offset in degrees [-180, 180] 152 | 153 | 154 | def calc_power(voltage_data: List[float], 155 | ampere_data: List[float])\ 156 | -> Tuple[float, float, float]: 157 | assert len(voltage_data) == len(ampere_data) > 0 158 | # P = 0 # active power/real power (Wirkleistung) 159 | # Q = 0 # reactive power (Blindleistung) 160 | # S = 0 # complex power/apparent power (Scheinleistung) 161 | 162 | # https://electronics.stackexchange.com/questions/199395/how-to-calculate-instantaneous-active-power-from-sampled-values-of-voltage-and-c/199401#199401 163 | instantaneous_power = [v*a for v, a in zip(voltage_data, ampere_data)] 164 | 165 | P = np.mean(instantaneous_power) 166 | S = rms(voltage_data) * rms(ampere_data) 167 | Q = sqrt(S**2 - P**2) 168 | # power_factor = P / S 169 | return P, Q, S 170 | 171 | 172 | # TODO name is not adequate 173 | # WARNING: Experimental 174 | def measure_main_frequencies_fft(samples: List[float], 175 | sampling_rate: float, 176 | freqeuency_search_count: int = 9, 177 | frqeuency_distinction_range: float = 2, # in Hz 178 | mode: str = "parabolic")\ 179 | -> List[Tuple[float, float]]: 180 | fourier = np.fft.rfft(samples * np.blackman(len(samples))) 181 | # blackman is better for main frequency estimation using parabolic or Gaussian interpolation 182 | # according to FFT_resol_note.pdf (IMPROVING FFT FREQUENCY MEASUREMENT RESOLUTION BY PARABOLIC 183 | # AND GAUSSIAN INTERPOLATION) 184 | 185 | fourier_amplitude = np.absolute(fourier) 186 | fourier_frequency = np.fft.rfftfreq(n=len(samples), d=1.0 / sampling_rate) 187 | fourier_frequency_step_width = fourier_frequency[1] 188 | 189 | # import matplotlib.pyplot as plt 190 | # plt.plot(fourier_frequency, fourier_amplitude) 191 | # plt.grid() 192 | # plt.show() 193 | 194 | frequencies = [] 195 | for harmonic_count in range(freqeuency_search_count): 196 | # get the highest value (+ index of that) 197 | max_index, max_value = max(enumerate(fourier_amplitude), key=lambda v: v[1]) 198 | max_index_interpolated = interpolate(fourier_amplitude, max_index, mode) 199 | max_value_interpolated = max_value # TODO interpolate 200 | frequencies.append((max_index_interpolated * fourier_frequency_step_width, max_value_interpolated)) 201 | 202 | # "remove" that frequency from the FFT 203 | half_fdr_as_index_size = (frqeuency_distinction_range/fourier_frequency_step_width)/2 204 | frequency_range_left = max(0, int(max_index_interpolated - half_fdr_as_index_size)) 205 | frequency_range_right = min(len(fourier_amplitude)-1, int(max_index_interpolated + half_fdr_as_index_size)) 206 | fourier_amplitude[frequency_range_left:frequency_range_right] = [0] * (frequency_range_right - frequency_range_left) 207 | print("fft step width", fourier_frequency_step_width) 208 | print("frequency_range_left", frequency_range_left) 209 | print("frequency_range_right", frequency_range_right) 210 | 211 | return frequencies 212 | 213 | # TODO name is not adequate 214 | # WARNING: Experimental 215 | # def measure_harmonics_fft(samples: List[float], sampling_rate: float, 216 | # harmonics_search_count: int = 9, mode: str = "parabolic")\ 217 | # -> List[Tuple[float, float]]: 218 | # fourier = np.fft.rfft(samples * np.blackman(len(samples))) 219 | # # blackman is better for main frequency estimation using parabolic or Gaussian interpolation 220 | # # according to FFT_resol_note.pdf (IMPROVING FFT FREQUENCY MEASUREMENT RESOLUTION BY PARABOLIC 221 | # # AND GAUSSIAN INTERPOLATION) 222 | # 223 | # fourier_amplitude = np.absolute(fourier) 224 | # fourier_frequency = np.fft.rfftfreq(n=len(samples), d=1.0 / sampling_rate) 225 | # fourier_frequency_step_width = fourier_frequency[1] 226 | # 227 | # import matplotlib.pyplot as plt 228 | # plt.plot(fourier_frequency, fourier_amplitude) 229 | # plt.grid() 230 | # plt.show() 231 | # 232 | # harmonics = [] 233 | # index_of_harmonic_0 = 0 234 | # previous_max_index = 0 235 | # for harmonic_count in range(harmonics_search_count): 236 | # # get the highest value (+ index of that) 237 | # search_start_index = index_of_harmonic_0 * harmonic_count 238 | # max_index, max_value = max(enumerate(fourier_amplitude[search_start_index:]), key=lambda v: v[1]) 239 | # max_index_interpolated = interpolate(fourier_amplitude, max_index, mode) 240 | # max_value_interpolated = max_value # TODO interpolate 241 | # harmonics.append(max_index_interpolated * fourier_frequency_step_width, max_value_interpolated) 242 | # previous_max_index = max_index 243 | # if index_of_harmonic_0 == 0: 244 | # index_of_harmonic_0 = max_index_interpolated 245 | # 246 | # return harmonics 247 | 248 | -------------------------------------------------------------------------------- /zoscf_log.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example zero offset shift compensation function file 3 | """ 4 | import math 5 | 6 | # extracted from ch1-ch8_0V_24C-25.5C.csv.xz.s440.csv 7 | # a , b , c 8 | __zos_data = [ 9 | [16.920415552899, -0.000381453840, 2012.6568], 10 | [21.070626029182, -0.000514977995, 1992.0672], 11 | [17.472230375023, -0.000398079698, 2008.5214], 12 | [19.685232121568, -0.000442328011, 2001.6491], 13 | [15.967868298571, -0.000336029388, 2010.2883], 14 | [16.050962032621, -0.000334699999, 2009.1168], 15 | [16.526705006909, -0.000337116893, 2002.3118], 16 | [18.756603380710, -0.000434281825, 2011.1874], 17 | ] 18 | __zero_offset_start = [2037.0, 2023.0, 2032.0, 2030.0, 2034.0, 2033.0, 2026.0, 2039.0] 19 | 20 | 21 | def calc_zos(ch: int, vscale: float, dtime: float) -> float: 22 | assert vscale == 1.0 23 | 24 | def exp(x, a, b, c): 25 | return a * math.e**(b * x) + c 26 | 27 | return __zero_offset_start[ch] - exp(dtime, *__zos_data[ch]) 28 | 29 | 30 | if __name__ == '__main__': 31 | print(*([t, calc_zos(0, 1.0, t)] for t in range(0, 120, 5)), sep="\n") 32 | --------------------------------------------------------------------------------