├── .gitignore
├── Python Offline Utilities IFU.pdf
├── README.md
├── brpylib
├── __init__.py
├── brMiscFxns.py
└── brpylib.py
├── examples
├── extract_continuous_data.ipynb
└── save_subset_nsx.py
├── pyproject.toml
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | __pycache__
3 | .vscode
4 | *.swp
5 | .DS_Store
6 | *.egg-info/
7 | .idea
8 |
--------------------------------------------------------------------------------
/Python Offline Utilities IFU.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/BlackrockNeurotech/Python-Utilities/fa75aa671680306788e10d3d8dd625f9da4ea4f6/Python Offline Utilities IFU.pdf
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Python-Utilities
2 | A collection of scripts for loading and manipulating Blackrock Microsystems datafiles.
3 |
4 | See the included instructions for use to see how to install and use this utility.
5 |
--------------------------------------------------------------------------------
/brpylib/__init__.py:
--------------------------------------------------------------------------------
1 | from .brpylib import NevFile, NsxFile, brpylib_ver
2 |
--------------------------------------------------------------------------------
/brpylib/brMiscFxns.py:
--------------------------------------------------------------------------------
1 | """
2 | Random functions that may be useful elsewhere (or necessary)
3 | current version: 1.2.0 --- 08/04/2016
4 |
5 | @author: Mitch Frankel - Blackrock Microsystems
6 |
7 | Version History:
8 | v1.0.0 - 07/05/2016 - initial release
9 | v1.1.0 - 07/12/2016 - minor editing changes to print statements and addition of version control
10 | v1.2.0 - 08/04/2016 - minor modifications to allow use of Python 2.6+
11 | """
12 | from os import getcwd, path
13 |
14 | try:
15 | from qtpy.QtWidgets import QApplication, QFileDialog
16 |
17 | HAS_QT = True
18 | except ModuleNotFoundError:
19 | HAS_QT = False
20 |
21 | # Version control
22 | brmiscfxns_ver = "1.2.0"
23 |
24 | # Patch for use with Python 2.6+
25 | try:
26 | input = raw_input
27 | except NameError:
28 | pass
29 |
30 |
31 | def openfilecheck(open_mode, file_name="", file_ext="", file_type=""):
32 | """
33 | :param open_mode: {str} method to open the file (e.g., 'rb' for binary read only)
34 | :param file_name: [optional] {str} full path of file to open
35 | :param file_ext: [optional] {str} file extension (e.g., '.nev')
36 | :param file_type: [optional] {str} file type for use when browsing for file (e.g., 'Blackrock NEV Files')
37 | :return: {file} opened file
38 | """
39 |
40 | while True:
41 | if not file_name: # no file name passed
42 | if not HAS_QT:
43 | raise ModuleNotFoundError(
44 | "Qt required for file dialog. Install PySide + qtpy or provide file_name."
45 | )
46 |
47 | # Ask user to specify a file path or browse
48 | file_name = input(
49 | "Enter complete " + file_ext + " file path or hit enter to browse: "
50 | )
51 |
52 | if not file_name:
53 | if "app" not in locals():
54 | app = QApplication([])
55 | if not file_ext:
56 | file_type = "All Files"
57 | file_name = QFileDialog.getOpenFileName(
58 | QFileDialog(),
59 | "Select File",
60 | getcwd(),
61 | file_type + " (*" + file_ext + ")",
62 | )
63 | file_name = file_name[0]
64 |
65 | # Ensure file exists (really needed for users type entering)
66 | if path.isfile(file_name):
67 | # Ensure given file matches file_ext
68 | if file_ext:
69 | _, fext = path.splitext(file_name)
70 |
71 | # check for * in extension
72 | if file_ext[-1] == "*":
73 | test_extension = file_ext[:-1]
74 | else:
75 | test_extension = file_ext
76 |
77 | if fext[0 : len(test_extension)] != test_extension:
78 | file_name = ""
79 | print(
80 | "\n*** File given is not a "
81 | + file_ext
82 | + " file, try again ***\n"
83 | )
84 | continue
85 | break
86 | else:
87 | file_name = ""
88 | print("\n*** File given does exist, try again ***\n")
89 |
90 | print("\n" + file_name.split("/")[-1] + " opened")
91 | return open(file_name, open_mode)
92 |
93 |
94 | def checkequal(iterator):
95 | try:
96 | iterator = iter(iterator)
97 | first = next(iterator)
98 | return all(first == rest for rest in iterator)
99 | except StopIteration:
100 | return True
101 |
--------------------------------------------------------------------------------
/brpylib/brpylib.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Collection of classes used for reading headers and data from Blackrock files
4 | current version: 2.0.1 --- 11/12/2021
5 |
6 | @author: Mitch Frankel - Blackrock Microsystems
7 | Stephen Hou - v1.4.0 edits
8 | David Kluger - v2.0.0 overhaul
9 |
10 | Version History:
11 | v1.0.0 - 07/05/2016 - initial release - requires brMiscFxns v1.0.0
12 | v1.1.0 - 07/08/2016 - inclusion of NsxFile.savesubsetnsx() for saving subset of Nsx data to disk4
13 | v1.1.1 - 07/09/2016 - update to NsxFile.savesubsetnsx() for option (not)overwriting subset files if already exist
14 | bug fixes in NsxFile class as reported from beta user
15 | v1.2.0 - 07/12/2016 - bug fixes in NsxFile.savesubsetnsx()
16 | added version control and checking for brMiscFxns
17 | requires brMiscFxns v1.1.0
18 | v1.3.0 - 07/22/2016 - added 'samp_per_s' to NsxFile.getdata() output
19 | added close() method to NsxFile and NevFile objects
20 | NsxFile.getdata() now pre-allocates output['data'] as zeros - speed and safety
21 | v1.3.1 - 08/02/2016 - bug fixes to NsxFile.getdata() for usability with Python 2.7 as reported from beta user
22 | patch for use with multiple NSP sync (overwriting of initial null data from initial data packet)
23 | __future__ import for use with Python 2.7 (division)
24 | minor modifications to allow use of Python 2.6+
25 | v1.3.2 - 08/12/2016 - bug fixes to NsXFile.getdata()
26 | v1.4.0 - 06/22/2017 - inclusion of wave_read parameter to NevFile.getdata() for including/excluding waveform data
27 | v2.0.0 - 04/27/2021 - numpy-based architecture rebuild of NevFile.getdata()
28 | v2.0.1 - 11/12/2021 - fixed indexing error in NevFile.getdata()
29 | Added numpy architecture to NsxFile.getdata()
30 | v2.0.2 - 03/21/2023 - added logic to NsxFile.getdata() for where PTP timestamps are applied to every continuous sample
31 | v2.0.3 - 05/11/2023 - Fixed bug with memmap and file.seek
32 | """
33 |
34 |
35 | from __future__ import division # for those using Python 2.6+
36 |
37 | from collections import namedtuple
38 | from datetime import datetime
39 | from math import ceil
40 | from os import path as ospath
41 | from struct import calcsize, pack, unpack, unpack_from
42 |
43 | import numpy as np
44 |
45 | from .brMiscFxns import brmiscfxns_ver, openfilecheck
46 |
47 | # Version control set/check
48 | brpylib_ver = "2.0.3"
49 | brmiscfxns_ver_req = "1.2.0"
50 | if brmiscfxns_ver.split(".") < brmiscfxns_ver_req.split("."):
51 | raise Exception(
52 | "brpylib requires brMiscFxns "
53 | + brmiscfxns_ver_req
54 | + " or higher, please use latest version"
55 | )
56 |
57 | # Patch for use with Python 2.6+
58 | try:
59 | input = raw_input
60 | except NameError:
61 | pass
62 |
63 | # Define global variables to remove magic numbers
64 | #
65 | WARNING_SLEEP_TIME = 5
66 | DATA_PAGING_SIZE = 1024**3
67 | DATA_FILE_SIZE_MIN = 1024**2 * 10
68 | STRING_TERMINUS = "\x00"
69 | UNDEFINED = 0
70 | ELEC_ID_DEF = "all"
71 | START_TIME_DEF = 0
72 | DATA_TIME_DEF = "all"
73 | DOWNSAMPLE_DEF = 1
74 | START_OFFSET_MIN = 0
75 | STOP_OFFSET_MIN = 0
76 |
77 | UV_PER_BIT_21 = 0.25
78 | WAVEFORM_SAMPLES_21 = 48
79 | NSX_BASIC_HEADER_BYTES_22 = 314
80 | NSX_EXT_HEADER_BYTES_22 = 66
81 | DATA_BYTE_SIZE = 2
82 | TIMESTAMP_NULL_21 = 0
83 | MAX_SAMP_PER_S = 30000
84 |
85 | NO_FILTER = 0
86 | BUTTER_FILTER = 1
87 | SERIAL_MODE = 0
88 |
89 | RB2D_MARKER = 1
90 | RB2D_BLOB = 2
91 | RB3D_MARKER = 3
92 | BOUNDARY_2D = 4
93 | MARKER_SIZE = 5
94 |
95 | DIGITAL_PACKET_ID = 0
96 | NEURAL_PACKET_ID_MIN = 1
97 | NEURAL_PACKET_ID_MAX = 16384
98 | COMMENT_PACKET_ID = 65535
99 | VIDEO_SYNC_PACKET_ID = 65534
100 | TRACKING_PACKET_ID = 65533
101 | BUTTON_PACKET_ID = 65532
102 | CONFIGURATION_PACKET_ID = 65531
103 |
104 | PARALLEL_REASON = 1
105 | PERIODIC_REASON = 64
106 | SERIAL_REASON = 129
107 | LOWER_BYTE_MASK = 255
108 | FIRST_BIT_MASK = 1
109 | SECOND_BIT_MASK = 2
110 |
111 | CLASSIFIER_MIN = 1
112 | CLASSIFIER_MAX = 16
113 | CLASSIFIER_NOISE = 255
114 |
115 | CHARSET_ANSI = 0
116 | CHARSET_UTF = 1
117 | CHARSET_ROI = 255
118 |
119 | COMM_RGBA = 0
120 | COMM_TIME = 1
121 |
122 | BUTTON_PRESS = 1
123 | BUTTON_RESET = 2
124 |
125 | CHG_NORMAL = 0
126 | CHG_CRITICAL = 1
127 |
128 | ENTER_EVENT = 1
129 | EXIT_EVENT = 2
130 | #
131 |
132 | # Define a named tuple that has information about header/packet fields
133 | FieldDef = namedtuple("FieldDef", ["name", "formatStr", "formatFnc"])
134 |
135 |
136 | #
137 | def processheaders(curr_file, packet_fields):
138 | """
139 | :param curr_file: {file} the current BR datafile to be processed
140 | :param packet_fields : {named tuple} the specific binary fields for the given header
141 | :return: a fully unpacked and formatted tuple set of header information
142 |
143 | Read a packet from a binary data file and return a list of fields
144 | The amount and format of data read will be specified by the
145 | packet_fields container
146 | """
147 |
148 | # This is a lot in one line. First I pull out all the format strings from
149 | # the basic_header_fields named tuple, then concatenate them into a string
150 | # with '<' at the front (for little endian format)
151 | packet_format_str = "<" + "".join([fmt for name, fmt, fun in packet_fields])
152 |
153 | # Calculate how many bytes to read based on the format strings of the header fields
154 | bytes_in_packet = calcsize(packet_format_str)
155 | packet_binary = curr_file.read(bytes_in_packet)
156 |
157 | # unpack the binary data from the header based on the format strings of each field.
158 | # This returns a list of data, but it's not always correctly formatted (eg, FileSpec
159 | # is read as ints 2 and 3 but I want it as '2.3'
160 | packet_unpacked = unpack(packet_format_str, packet_binary)
161 |
162 | # Create a iterator from the data list. This allows a formatting function
163 | # to use more than one item from the list if needed, and the next formatting
164 | # function can pick up on the correct item in the list
165 | data_iter = iter(packet_unpacked)
166 |
167 | # create an empty dictionary from the name field of the packet_fields.
168 | # The loop below will fill in the values with formatted data by calling
169 | # each field's formatting function
170 | packet_formatted = dict.fromkeys([name for name, fmt, fun in packet_fields])
171 | for name, fmt, fun in packet_fields:
172 | packet_formatted[name] = fun(data_iter)
173 |
174 | return packet_formatted
175 |
176 |
177 | def format_filespec(header_list):
178 | return str(next(header_list)) + "." + str(next(header_list)) # eg 2.3
179 |
180 |
181 | def format_timeorigin(header_list):
182 | year = next(header_list)
183 | month = next(header_list)
184 | _ = next(header_list)
185 | day = next(header_list)
186 | hour = next(header_list)
187 | minute = next(header_list)
188 | second = next(header_list)
189 | millisecond = next(header_list)
190 | return datetime(year, month, day, hour, minute, second, millisecond * 1000)
191 |
192 |
193 | def format_stripstring(header_list):
194 | string = bytes.decode(next(header_list), "latin-1")
195 | return string.split(STRING_TERMINUS, 1)[0]
196 |
197 |
198 | def format_none(header_list):
199 | return next(header_list)
200 |
201 |
202 | def format_freq(header_list):
203 | return str(float(next(header_list)) / 1000) + " Hz"
204 |
205 |
206 | def format_filter(header_list):
207 | filter_type = next(header_list)
208 | if filter_type == NO_FILTER:
209 | return "none"
210 | elif filter_type == BUTTER_FILTER:
211 | return "butterworth"
212 |
213 |
214 | def format_charstring(header_list):
215 | return int(next(header_list))
216 |
217 |
218 | def format_digconfig(header_list):
219 | config = next(header_list) & FIRST_BIT_MASK
220 | if config:
221 | return "active"
222 | else:
223 | return "ignored"
224 |
225 |
226 | def format_anaconfig(header_list):
227 | config = next(header_list)
228 | if config & FIRST_BIT_MASK:
229 | return "low_to_high"
230 | if config & SECOND_BIT_MASK:
231 | return "high_to_low"
232 | else:
233 | return "none"
234 |
235 |
236 | def format_digmode(header_list):
237 | dig_mode = next(header_list)
238 | if dig_mode == SERIAL_MODE:
239 | return "serial"
240 | else:
241 | return "parallel"
242 |
243 |
244 | def format_trackobjtype(header_list):
245 | trackobj_type = next(header_list)
246 | if trackobj_type == UNDEFINED:
247 | return "undefined"
248 | elif trackobj_type == RB2D_MARKER:
249 | return "2D RB markers"
250 | elif trackobj_type == RB2D_BLOB:
251 | return "2D RB blob"
252 | elif trackobj_type == RB3D_MARKER:
253 | return "3D RB markers"
254 | elif trackobj_type == BOUNDARY_2D:
255 | return "2D boundary"
256 | elif trackobj_type == MARKER_SIZE:
257 | return "marker size"
258 | else:
259 | return "error"
260 |
261 |
262 | def getdigfactor(ext_headers, idx):
263 | max_analog = ext_headers[idx]["MaxAnalogValue"]
264 | min_analog = ext_headers[idx]["MinAnalogValue"]
265 | max_digital = ext_headers[idx]["MaxDigitalValue"]
266 | min_digital = ext_headers[idx]["MinDigitalValue"]
267 | return float(max_analog - min_analog) / float(max_digital - min_digital)
268 |
269 |
270 | #
271 |
272 |
273 | #
274 | nev_header_dict = {
275 | "basic": [
276 | FieldDef("FileTypeID", "8s", format_stripstring), # 8 bytes - 8 char array
277 | FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char
278 | FieldDef("AddFlags", "H", format_none), # 2 bytes - uint16
279 | FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32
280 | FieldDef("BytesInDataPackets", "I", format_none), # 4 bytes - uint32
281 | FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32
282 | FieldDef("SampleTimeResolution", "I", format_none), # 4 bytes - uint32
283 | FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 x uint16
284 | FieldDef(
285 | "CreatingApplication", "32s", format_stripstring
286 | ), # 32 bytes - 32 char array
287 | FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array
288 | FieldDef("NumExtendedHeaders", "I", format_none),
289 | ], # 4 bytes - uint32
290 | "ARRAYNME": FieldDef(
291 | "ArrayName", "24s", format_stripstring
292 | ), # 24 bytes - 24 char array
293 | "ECOMMENT": FieldDef(
294 | "ExtraComment", "24s", format_stripstring
295 | ), # 24 bytes - 24 char array
296 | "CCOMMENT": FieldDef(
297 | "ContComment", "24s", format_stripstring
298 | ), # 24 bytes - 24 char array
299 | "MAPFILE": FieldDef(
300 | "MapFile", "24s", format_stripstring
301 | ), # 24 bytes - 24 char array
302 | "NEUEVWAV": [
303 | FieldDef("ElectrodeID", "H", format_none), # 2 bytes - uint16
304 | FieldDef(
305 | "PhysicalConnector", "B", format_charstring
306 | ), # 1 byte - 1 unsigned char
307 | FieldDef("ConnectorPin", "B", format_charstring), # 1 byte - 1 unsigned char
308 | FieldDef("DigitizationFactor", "H", format_none), # 2 bytes - uint16
309 | FieldDef("EnergyThreshold", "H", format_none), # 2 bytes - uint16
310 | FieldDef("HighThreshold", "h", format_none), # 2 bytes - int16
311 | FieldDef("LowThreshold", "h", format_none), # 2 bytes - int16
312 | FieldDef(
313 | "NumSortedUnits", "B", format_charstring
314 | ), # 1 byte - 1 unsigned char
315 | FieldDef(
316 | "BytesPerWaveform", "B", format_charstring
317 | ), # 1 byte - 1 unsigned char
318 | FieldDef("SpikeWidthSamples", "H", format_none), # 2 bytes - uint16
319 | FieldDef("EmptyBytes", "8s", format_none),
320 | ], # 8 bytes - empty
321 | "NEUEVLBL": [
322 | FieldDef("ElectrodeID", "H", format_none), # 2 bytes - uint16
323 | FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array
324 | FieldDef("EmptyBytes", "6s", format_none),
325 | ], # 6 bytes - empty
326 | "NEUEVFLT": [
327 | FieldDef("ElectrodeID", "H", format_none), # 2 bytes - uint16
328 | FieldDef("HighFreqCorner", "I", format_freq), # 4 bytes - uint32
329 | FieldDef("HighFreqOrder", "I", format_none), # 4 bytes - uint32
330 | FieldDef("HighFreqType", "H", format_filter), # 2 bytes - uint16
331 | FieldDef("LowFreqCorner", "I", format_freq), # 4 bytes - uint32
332 | FieldDef("LowFreqOrder", "I", format_none), # 4 bytes - uint32
333 | FieldDef("LowFreqType", "H", format_filter), # 2 bytes - uint16
334 | FieldDef("EmptyBytes", "2s", format_none),
335 | ], # 2 bytes - empty
336 | "DIGLABEL": [
337 | FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array
338 | FieldDef("Mode", "?", format_digmode), # 1 byte - boolean
339 | FieldDef("EmptyBytes", "7s", format_none),
340 | ], # 7 bytes - empty
341 | "NSASEXEV": [
342 | FieldDef("Frequency", "H", format_none), # 2 bytes - uint16
343 | FieldDef(
344 | "DigitalInputConfig", "B", format_digconfig
345 | ), # 1 byte - 1 unsigned char
346 | FieldDef(
347 | "AnalogCh1Config", "B", format_anaconfig
348 | ), # 1 byte - 1 unsigned char
349 | FieldDef("AnalogCh1DetectVal", "h", format_none), # 2 bytes - int16
350 | FieldDef(
351 | "AnalogCh2Config", "B", format_anaconfig
352 | ), # 1 byte - 1 unsigned char
353 | FieldDef("AnalogCh2DetectVal", "h", format_none), # 2 bytes - int16
354 | FieldDef(
355 | "AnalogCh3Config", "B", format_anaconfig
356 | ), # 1 byte - 1 unsigned char
357 | FieldDef("AnalogCh3DetectVal", "h", format_none), # 2 bytes - int16
358 | FieldDef(
359 | "AnalogCh4Config", "B", format_anaconfig
360 | ), # 1 byte - 1 unsigned char
361 | FieldDef("AnalogCh4DetectVal", "h", format_none), # 2 bytes - int16
362 | FieldDef(
363 | "AnalogCh5Config", "B", format_anaconfig
364 | ), # 1 byte - 1 unsigned char
365 | FieldDef("AnalogCh5DetectVal", "h", format_none), # 2 bytes - int16
366 | FieldDef("EmptyBytes", "6s", format_none),
367 | ], # 2 bytes - empty
368 | "VIDEOSYN": [
369 | FieldDef("VideoSourceID", "H", format_none), # 2 bytes - uint16
370 | FieldDef("VideoSource", "16s", format_stripstring), # 16 bytes - 16 char array
371 | FieldDef("FrameRate", "f", format_none), # 4 bytes - single float
372 | FieldDef("EmptyBytes", "2s", format_none),
373 | ], # 2 bytes - empty
374 | "TRACKOBJ": [
375 | FieldDef("TrackableType", "H", format_trackobjtype), # 2 bytes - uint16
376 | FieldDef("TrackableID", "I", format_none), # 4 bytes - uint32
377 | # FieldDef('PointCount', 'H', format_none), # 2 bytes - uint16
378 | FieldDef("VideoSource", "16s", format_stripstring), # 16 bytes - 16 char array
379 | FieldDef("EmptyBytes", "2s", format_none),
380 | ], # 2 bytes - empty
381 | }
382 |
383 | nsx_header_dict = {
384 | "basic_21": [
385 | FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array
386 | FieldDef("Period", "I", format_none), # 4 bytes - uint32
387 | FieldDef("ChannelCount", "I", format_none),
388 | ], # 4 bytes - uint32
389 | "basic": [
390 | FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char
391 | FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32
392 | FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array
393 | FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array
394 | FieldDef("Period", "I", format_none), # 4 bytes - uint32
395 | FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32
396 | FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 uint16
397 | FieldDef("ChannelCount", "I", format_none),
398 | ], # 4 bytes - uint32
399 | "extended": [
400 | FieldDef("Type", "2s", format_stripstring), # 2 bytes - 2 char array
401 | FieldDef("ElectrodeID", "H", format_none), # 2 bytes - uint16
402 | FieldDef(
403 | "ElectrodeLabel", "16s", format_stripstring
404 | ), # 16 bytes - 16 char array
405 | FieldDef("PhysicalConnector", "B", format_none), # 1 byte - uint8
406 | FieldDef("ConnectorPin", "B", format_none), # 1 byte - uint8
407 | FieldDef("MinDigitalValue", "h", format_none), # 2 bytes - int16
408 | FieldDef("MaxDigitalValue", "h", format_none), # 2 bytes - int16
409 | FieldDef("MinAnalogValue", "h", format_none), # 2 bytes - int16
410 | FieldDef("MaxAnalogValue", "h", format_none), # 2 bytes - int16
411 | FieldDef("Units", "16s", format_stripstring), # 16 bytes - 16 char array
412 | FieldDef("HighFreqCorner", "I", format_freq), # 4 bytes - uint32
413 | FieldDef("HighFreqOrder", "I", format_none), # 4 bytes - uint32
414 | FieldDef("HighFreqType", "H", format_filter), # 2 bytes - uint16
415 | FieldDef("LowFreqCorner", "I", format_freq), # 4 bytes - uint32
416 | FieldDef("LowFreqOrder", "I", format_none), # 4 bytes - uint32
417 | FieldDef("LowFreqType", "H", format_filter),
418 | ], # 2 bytes - uint16
419 | "data": [
420 | FieldDef("Header", "B", format_none), # 1 byte - uint8
421 | FieldDef("Timestamp", "I", format_none), # 4 bytes - uint32
422 | FieldDef("NumDataPoints", "I", format_none),
423 | ], # 4 bytes - uint32]
424 | }
425 | #
426 |
427 |
428 | #
429 | def check_elecid(elec_ids):
430 | if type(elec_ids) is str and elec_ids != ELEC_ID_DEF:
431 | print(
432 | "\n*** WARNING: Electrode IDs must be 'all', a single integer, or a list of integers."
433 | )
434 | print(" Setting elec_ids to 'all'")
435 | elec_ids = ELEC_ID_DEF
436 | if elec_ids != ELEC_ID_DEF and type(elec_ids) is not list:
437 | if type(elec_ids) == range:
438 | elec_ids = list(elec_ids)
439 | elif type(elec_ids) == int:
440 | elec_ids = [elec_ids]
441 | return elec_ids
442 |
443 |
444 | def check_starttime(start_time_s):
445 | if not isinstance(start_time_s, (int, float)) or (
446 | isinstance(start_time_s, (int, float)) and start_time_s < START_TIME_DEF
447 | ):
448 | print("\n*** WARNING: Start time is not valid, setting start_time_s to 0")
449 | start_time_s = START_TIME_DEF
450 | return start_time_s
451 |
452 |
453 | def check_datatime(data_time_s):
454 | if (type(data_time_s) is str and data_time_s != DATA_TIME_DEF) or (
455 | isinstance(data_time_s, (int, float)) and data_time_s < 0
456 | ):
457 | print("\n*** WARNING: Data time is not valid, setting data_time_s to 'all'")
458 | data_time_s = DATA_TIME_DEF
459 | return data_time_s
460 |
461 |
462 | def check_downsample(downsample):
463 | if not isinstance(downsample, int) or downsample < DOWNSAMPLE_DEF:
464 | print(
465 | "\n*** WARNING: downsample must be an integer value greater than 0. "
466 | " Setting downsample to 1 (no downsampling)"
467 | )
468 | downsample = DOWNSAMPLE_DEF
469 | if downsample > 1:
470 | print(
471 | "\n*** WARNING: downsample will be deprecated in a future version."
472 | " Set downsample to 1 (default) to match future behavior."
473 | "\n*** WARNING: downsample does not perform anti-aliasing."
474 | )
475 | return downsample
476 |
477 |
478 | def check_dataelecid(elec_ids, all_elec_ids):
479 | unique_elec_ids = set(elec_ids)
480 | all_elec_ids = set(all_elec_ids)
481 |
482 | # if some electrodes asked for don't exist, reset list with those that do, or throw error and return
483 | if not unique_elec_ids.issubset(all_elec_ids):
484 | if not unique_elec_ids & all_elec_ids:
485 | print("\nNone of the elec_ids passed exist in the data, returning None")
486 | return None
487 | else:
488 | print(
489 | "\n*** WARNING: Channels "
490 | + str(sorted(list(unique_elec_ids - all_elec_ids)))
491 | + " do not exist in the data"
492 | )
493 | unique_elec_ids = unique_elec_ids & all_elec_ids
494 |
495 | return sorted(list(unique_elec_ids))
496 |
497 |
498 | def check_filesize(file_size):
499 | if file_size < DATA_FILE_SIZE_MIN:
500 | print("\n file_size must be larger than 10 Mb, setting file_size=10 Mb")
501 | return DATA_FILE_SIZE_MIN
502 | else:
503 | return int(file_size)
504 |
505 |
506 | #
507 |
508 |
509 | class NevFile:
510 | """
511 | attributes and methods for all BR event data files. Initialization opens the file and extracts the
512 | basic header information.
513 | """
514 |
515 | def __init__(self, datafile=""):
516 | self.datafile = datafile
517 | self.basic_header = {}
518 | self.extended_headers = []
519 |
520 | # Run openfilecheck and open the file passed or allow user to browse to one
521 | self.datafile = openfilecheck(
522 | "rb",
523 | file_name=self.datafile,
524 | file_ext=".nev",
525 | file_type="Blackrock NEV Files",
526 | )
527 |
528 | # extract basic header information
529 | self.basic_header = processheaders(self.datafile, nev_header_dict["basic"])
530 |
531 | # Extract extended headers
532 | for i in range(self.basic_header["NumExtendedHeaders"]):
533 | self.extended_headers.append({})
534 | header_string = bytes.decode(
535 | unpack("<8s", self.datafile.read(8))[0], "latin-1"
536 | )
537 | self.extended_headers[i]["PacketID"] = header_string.split(
538 | STRING_TERMINUS, 1
539 | )[0]
540 | self.extended_headers[i].update(
541 | processheaders(
542 | self.datafile, nev_header_dict[self.extended_headers[i]["PacketID"]]
543 | )
544 | )
545 |
546 | # Must set this for file spec 2.1 and 2.2
547 | if (
548 | header_string == "NEUEVWAV"
549 | and float(self.basic_header["FileSpec"]) < 2.3
550 | ):
551 | self.extended_headers[i]["SpikeWidthSamples"] = WAVEFORM_SAMPLES_21
552 |
553 | def getdata(self, elec_ids="all", wave_read="read"):
554 | """
555 | This function is used to return a set of data from the NEV datafile.
556 |
557 | :param elec_ids: [optional] {list} User selection of elec_ids to extract specific spike waveforms (e.g., [13])
558 | :param wave_read: [optional] {STR} 'read' or 'no_read' - whether to read waveforms or not
559 | :return: output: {Dictionary} with one or more of the following dictionaries (all include TimeStamps)
560 | dig_events: Reason, Data, [for file spec 2.2 and below, AnalogData and AnalogDataUnits]
561 | spike_events: Units='nV', ChannelID, NEUEVWAV_HeaderIndices, Classification, Waveforms
562 | comments: CharSet, Flag, Data, Comment
563 | video_sync_events: VideoFileNum, VideoFrameNum, VideoElapsedTime_ms, VideoSourceID
564 | tracking_events: ParentID, NodeID, NodeCount, TrackingPoints
565 | button_trigger_events: TriggerType
566 | configuration_events: ConfigChangeType
567 |
568 | Note: For digital and neural data - TimeStamps, Classification, and Data can be lists of lists when more
569 | than one digital type or spike event exists for a channel
570 | """
571 |
572 | # Initialize output dictionary and reset position in file (if read before, may not be here anymore)
573 | output = dict()
574 |
575 | # Safety checks
576 | elec_ids = check_elecid(elec_ids)
577 |
578 | ######
579 | # extract raw data
580 | self.datafile.seek(0, 2)
581 | lData = self.datafile.tell()
582 | nPackets = int(
583 | (lData - self.basic_header["BytesInHeader"])
584 | / self.basic_header["BytesInDataPackets"]
585 | )
586 | self.datafile.seek(self.basic_header["BytesInHeader"], 0)
587 | rawdata = self.datafile.read()
588 | # rawdataArray = np.reshape(np.fromstring(rawdata,'B'),(nPackets,self.basic_header['BytesInDataPackets']))
589 |
590 | # Find all timestamps and PacketIDs
591 | if self.basic_header["FileTypeID"] == "BREVENTS":
592 | tsBytes = 8
593 | ts = np.ndarray(
594 | (nPackets,),
595 | " 0:
626 | ChannelID = PacketID
627 | if type(elec_ids) is list:
628 | elecindices = [
629 | idx
630 | for idx, element in enumerate(ChannelID[neuralPackets])
631 | if element in elec_ids
632 | ]
633 | neuralPackets = [neuralPackets[index] for index in elecindices]
634 |
635 | spikeUnit = np.ndarray(
636 | (nPackets,),
637 | " 0:
666 | insertionReason = np.ndarray(
667 | (nPackets,),
668 | " 0:
691 | charSet = np.ndarray(
692 | (nPackets,),
693 | " 0:
711 | charSetList[ANSIPackets] = "ANSI"
712 | UTFPackets = [
713 | idx for idx, element in enumerate(charSet) if element == CHARSET_UTF
714 | ]
715 | if len(UTFPackets) > 0:
716 | charSetList[UTFPackets] = "UTF "
717 |
718 | # need to transfer comments from neuromotive. identify region of interest (ROI) events...
719 | ROIPackets = [
720 | idx for idx, element in enumerate(charSet) if element == CHARSET_ROI
721 | ]
722 |
723 | lcomment = self.basic_header["BytesInDataPackets"] - tsBytes - 10
724 | comments = np.chararray(
725 | (nPackets, lcomment),
726 | 1,
727 | False,
728 | rawdata,
729 | tsBytes + 8,
730 | (self.basic_header["BytesInDataPackets"], 1),
731 | )
732 |
733 | # extract only the "true" comments, distinct from ROI packets
734 | trueComments = np.setdiff1d(
735 | list(range(0, len(commentPackets) - 1)), ROIPackets
736 | )
737 | trueCommentsidx = np.asarray(commentPackets)[trueComments]
738 | textComments = comments[trueCommentsidx]
739 | textComments[:, -1] = "$"
740 | stringarray = textComments.tostring()
741 | stringvector = stringarray.decode("latin-1")
742 | stringvector = stringvector[0:-1]
743 | validstrings = stringvector.replace("\x00", "")
744 | commentsFinal = validstrings.split("$")
745 |
746 | # Remove the ROI comments from the list
747 | subsetInds = list(
748 | set(list(range(0, len(charSetList) - 1))) - set(ROIPackets)
749 | )
750 |
751 | output["comments"] = {
752 | "TimeStamps": list(ts[trueCommentsidx]),
753 | "TimeStampsStarted": list(tsStarted[trueCommentsidx]),
754 | "Data": commentsFinal,
755 | "CharSet": list(charSetList[subsetInds]),
756 | }
757 |
758 | # parsing and outputing ROI events
759 | if len(ROIPackets) > 0:
760 | nmPackets = np.asarray(ROIPackets)
761 | nmCommentsidx = np.asarray(commentPackets)[ROIPackets]
762 | nmcomments = comments[nmCommentsidx]
763 | nmcomments[:, -1] = ":"
764 | nmstringarray = nmcomments.tostring()
765 | nmstringvector = nmstringarray.decode("latin-1")
766 | nmstringvector = nmstringvector[0:-1]
767 | nmvalidstrings = nmstringvector.replace("\x00", "")
768 | nmcommentsFinal = nmvalidstrings.split(":")
769 | ROIfields = [l.split(":") for l in ":".join(nmcommentsFinal).split(":")]
770 | ROIfieldsRS = np.reshape(ROIfields, (len(ROIPackets), 5))
771 | output["tracking_events"] = {
772 | "TimeStamps": list(ts[nmCommentsidx]),
773 | "ROIName": list(ROIfieldsRS[:, 0]),
774 | "ROINumber": list(ROIfieldsRS[:, 1]),
775 | "Event": list(ROIfieldsRS[:, 2]),
776 | "Frame": list(ROIfieldsRS[:, 3]),
777 | }
778 |
779 | # NeuroMotive video syncronization packets
780 | vidsyncPackets = [
781 | idx
782 | for idx, element in enumerate(PacketID)
783 | if element == VIDEO_SYNC_PACKET_ID
784 | ]
785 | if len(vidsyncPackets) > 0:
786 | fileNumber = np.ndarray(
787 | (nPackets,),
788 | " 0:
827 | trackerObjs = [
828 | sub["VideoSource"]
829 | for sub in self.extended_headers
830 | if sub["PacketID"] == "TRACKOBJ"
831 | ]
832 | trackerIDs = [
833 | sub["TrackableID"]
834 | for sub in self.extended_headers
835 | if sub["PacketID"] == "TRACKOBJ"
836 | ]
837 | output["tracking"] = {
838 | "TrackerIDs": trackerIDs,
839 | "TrackerTypes": [
840 | sub["TrackableType"]
841 | for sub in self.extended_headers
842 | if sub["PacketID"] == "TRACKOBJ"
843 | ],
844 | }
845 | parentID = np.ndarray(
846 | (nPackets,),
847 | " 0:
937 | trigType = np.ndarray(
938 | (nPackets,),
939 | " 0:
956 | changeType = np.ndarray(
957 | (nPackets,),
958 | "= 3.x with PTP timestamping.
1093 | :return: output: {Dictionary} of: data_headers: {list} dictionaries of all data headers, 1 per segment
1094 | [seg_id]["Timestamp"]: timestamps of each sample in segment
1095 | if full_timestamps, else timestamp of first sample in segment
1096 | [seg_id]["NumDataPoints"]: number of samples in segment
1097 | [seg_id]["data_time_s"]: duration in segment
1098 | elec_ids: {list} elec_ids that were extracted (sorted)
1099 | start_time_s: {float} starting time for data extraction
1100 | data_time_s: {float} length of time of data returned
1101 | downsample: {int} data downsampling factor
1102 | samp_per_s: {float} output data samples per second
1103 | data: {numpy array} continuous data in a 2D elec x samps numpy array
1104 | (or samps x elec if elec_rows is False).
1105 | """
1106 | # Safety checks
1107 | start_time_s = check_starttime(start_time_s)
1108 | data_time_s = check_datatime(data_time_s)
1109 | downsample = check_downsample(downsample)
1110 | elec_ids = check_elecid(elec_ids)
1111 | if zeropad and self.basic_header["TimeStampResolution"] == 1e9:
1112 | print("zeropad does not work with ptp-timestamped data. Ignoring zeropad argument.\n")
1113 | zeropad = False
1114 | if force_srate and self.basic_header["TimeStampResolution"] != 1e9:
1115 | print("force_srate only works with ptp timestamps in filespec >= 3.x. Ignoring force_srate argument.\n")
1116 | force_srate = False
1117 |
1118 | # initialize parameters
1119 | output = dict()
1120 | output["start_time_s"] = float(start_time_s)
1121 | output["data_time_s"] = data_time_s
1122 | output["downsample"] = downsample
1123 | output["elec_ids"] = []
1124 | output["data_headers"] = [] # List of dicts with fields Timestamp, NumDataPoints, data_time_s, BoH, BoD
1125 | output["data"] = [] # List of ndarrays
1126 | output["samp_per_s"] = self.basic_header["SampleResolution"] / self.basic_header["Period"]
1127 |
1128 | # Pull some useful variables from the basic_header
1129 | data_pt_size = self.basic_header["ChannelCount"] * DATA_BYTE_SIZE
1130 | clk_per_samp = self.basic_header["Period"] * self.basic_header["TimeStampResolution"] / self.basic_header["SampleResolution"]
1131 | filespec_maj, filespec_min = tuple([int(_) for _ in self.basic_header["FileSpec"].split(".")][:2])
1132 |
1133 | # Timestamp is 64-bit for filespec >= 3.0
1134 | ts_type, ts_size = (" 2 else ("= 3:
1191 | # Starty by assuming that these files are from firmware >= 7.6 thus we have 1 sample per packet.
1192 | npackets = int((eof - eoh) / np.dtype(ptp_dt).itemsize)
1193 | struct_arr = np.memmap(self.datafile, dtype=ptp_dt, shape=npackets, offset=eoh, mode="r")
1194 | self.datafile.seek(eoh, 0) # Reset to end-of-header in case memmap moved the pointer.
1195 | samp_per_pkt = np.all(struct_arr["num_data_points"] == 1) # Confirm 1 sample per packet
1196 |
1197 | if not samp_per_pkt:
1198 | # Multiple samples per packet; 1 packet == 1 uninterrupted segment.
1199 | while 0 < self.datafile.tell() < ospath.getsize(self.datafile.name):
1200 | # boh = self.datafile.tell() # Beginning of segment header
1201 | self.datafile.seek(1, 1) # Skip the reserved 0x01
1202 | timestamp = unpack(ts_type, self.datafile.read(ts_size))[0]
1203 | num_data_pts = unpack(" expected_loc:
1226 | # Moved it too far (probably to end of file); move manually from beginning to expected.
1227 | self.datafile.seek(expected_loc, 0)
1228 | else:
1229 | # 1 sample per packet. Reuse struct_arr.
1230 | seg_thresh_clk = 2 * clk_per_samp
1231 | seg_starts = np.hstack((0, 1 + np.argwhere(np.diff(struct_arr["timestamps"]) > seg_thresh_clk).flatten()))
1232 | for seg_ix, seg_start_idx in enumerate(seg_starts):
1233 | seg_stop_idx = seg_starts[seg_ix + 1] if seg_ix < (len(seg_starts) - 1) else (len(struct_arr) - 1)
1234 | offset = eoh + seg_start_idx * struct_arr.dtype.itemsize
1235 | num_data_pts = seg_stop_idx - seg_start_idx
1236 | seg_struct_arr = np.memmap(self.datafile, dtype=ptp_dt, shape=num_data_pts, offset=offset, mode="r")
1237 | output["data_headers"].append({
1238 | "Timestamp": seg_struct_arr["timestamps"],
1239 | "NumDataPoints": num_data_pts,
1240 | "data_time_s": num_data_pts / output["samp_per_s"]
1241 | })
1242 | output["data"].append(seg_struct_arr["samples"])
1243 |
1244 | ## Post-processing ##
1245 |
1246 | # Drop segments that are not within the requested time window
1247 | ts_0 = output["data_headers"][0]["Timestamp"][0]
1248 | start_time_ts = start_time_s * self.basic_header["TimeStampResolution"]
1249 | test_start_ts = ts_0 + start_time_ts
1250 | test_stop_ts = np.inf # Will update below
1251 | if start_time_s != START_TIME_DEF:
1252 | # Keep segments with at least one sample on-or-after test_start_ts
1253 | b_keep = [_["Timestamp"][-1] >= test_start_ts for _ in output["data_headers"]]
1254 | output["data_headers"] = [_ for _, b in zip(output["data_headers"], b_keep) if b]
1255 | output["data"] = [_ for _, b in zip(output["data"], b_keep) if b]
1256 | if data_time_s != DATA_TIME_DEF:
1257 | # Keep segments with at least one sample on-or-before test_stop_ts
1258 | data_time_ts = data_time_s * self.basic_header["TimeStampResolution"]
1259 | test_stop_ts = test_start_ts + data_time_ts
1260 | b_keep = [_["Timestamp"][0] <= test_stop_ts for _ in output["data_headers"]]
1261 | output["data_headers"] = [_ for _, b in zip(output["data_headers"], b_keep) if b]
1262 | output["data"] = [_ for _, b in zip(output["data"], b_keep) if b]
1263 |
1264 | # Post-process segments for start_time_s, data_time_s, zeropad
1265 | for ix, data_header in enumerate(output["data_headers"]):
1266 | data = output["data"][ix]
1267 | # start_time_s and data_time_s
1268 | b_keep = np.ones((data.shape[0],), dtype=bool)
1269 | if start_time_s > START_TIME_DEF and data_header["Timestamp"][0] < test_start_ts:
1270 | # if segment begins before test_start_ts, slice it to begin at test_start_ts.
1271 | b_keep &= data_header["Timestamp"] >= test_start_ts
1272 | if data_time_s != DATA_TIME_DEF and data_header["Timestamp"][-1] > test_stop_ts:
1273 | # if segment finishes after start_time_s + data_time_s, slice it to finish at start_time_s + data_time_s
1274 | b_keep &= data_header["Timestamp"] <= test_stop_ts
1275 | if np.any(~b_keep):
1276 | data_header["Timestamp"] = data_header["Timestamp"][b_keep]
1277 | data = data[b_keep]
1278 |
1279 | # zeropad: Prepend the data with zeros so its first timestamp is nsp_time=0.
1280 | if ix == 0 and zeropad and data_header["Timestamp"][0] != 0:
1281 | # Calculate how many samples we need.
1282 | padsize = ceil(data_header["Timestamp"][0] / self.basic_header["Period"])
1283 | pad_dat = np.zeros((padsize, data.shape[1]), dtype=data.dtype)
1284 | # Stack pad_dat in front of output["data"][ix]. Slow! Might run out of memory!
1285 | try:
1286 | data = np.vstack((pad_dat, data))
1287 | except MemoryError as err:
1288 | err.args += (
1289 | " Output data size requested is larger than available memory. Use the parameters\n"
1290 | " for getdata(), e.g., 'elec_ids', to request a subset of the data or use\n"
1291 | " NsxFile.savesubsetnsx() to create subsets of the main nsx file\n",
1292 | )
1293 | raise
1294 | pad_ts = data_header["Timestamp"][0] - (clk_per_samp * np.arange(1, padsize + 1)).astype(np.int64)[::-1]
1295 | data_header["Timestamp"] = np.hstack((pad_ts, data_header["Timestamp"]))
1296 |
1297 | # force_srate: Force the returned arrays to have exactly the expected number of samples per elapsed ptp time.
1298 | if force_srate:
1299 | # Dur of segment in ts-clks (nanoseconds)
1300 | seg_clks = data_header["Timestamp"][-1] - data_header["Timestamp"][0] + np.uint64(clk_per_samp)
1301 | # Number of samples in segment
1302 | npoints = data.shape[0]
1303 | # Expected number of samples based on duration.
1304 | n_expected = seg_clks / clk_per_samp
1305 | # How many are we missing? -ve number means we have too many.
1306 | n_insert = int(np.round(n_expected - npoints))
1307 | # identify where in the segments the data should be added/removed
1308 | insert_inds = np.linspace(0, npoints, num=abs(n_insert) + 1, endpoint=False, dtype=int)[1:]
1309 | if n_insert > 0:
1310 | # Create samples for the middle of the N largest gaps then insert.
1311 | insert_vals = (data[insert_inds] + data[insert_inds + 1]) / 2
1312 | data = np.insert(data, insert_inds, insert_vals, axis=0)
1313 | elif n_insert < 0:
1314 | data = np.delete(data, insert_inds, axis=0)
1315 |
1316 | # Replace data_header["Timestamp"] with ideal timestamps
1317 | data_header["Timestamp"] = data_header["Timestamp"][0] + (clk_per_samp * np.arange(data.shape[0])).astype(np.int64)
1318 |
1319 | if downsample > 1:
1320 | data = data[::downsample]
1321 |
1322 | data_header["NumDataPoints"] = data.shape[0]
1323 | data_header["data_time_s"] = data_header["NumDataPoints"] / output["samp_per_s"]
1324 |
1325 | if elec_rows:
1326 | data = data.T
1327 |
1328 | output["data"][ix] = data
1329 |
1330 | if not full_timestamps:
1331 | data_header["Timestamp"] = data_header["Timestamp"][0]
1332 |
1333 | return output
1334 |
1335 | def savesubsetnsx(
1336 | self, elec_ids="all", file_size=None, file_time_s=None, file_suffix=""
1337 | ):
1338 | """
1339 | This function is used to save a subset of data based on electrode IDs, file sizing, or file data time. If
1340 | both file_time_s and file_size are passed, it will default to file_time_s and determine sizing accordingly.
1341 |
1342 | :param elec_ids: [optional] {list} List of elec_ids to extract (e.g., [13])
1343 | :param file_size: [optional] {int} Byte size of each subset file to save (e.g., 1024**3 = 1 Gb). If nothing
1344 | is passed, file_size will be all data points.
1345 | :param file_time_s: [optional] {float} Time length of data for each subset file, in seconds (e.g. 60.0). If
1346 | nothing is passed, file_size will be used as default.
1347 | :param file_suffix: [optional] {str} Suffix to append to NSx datafile name for subset files. If nothing is
1348 | passed, default will be "_subset".
1349 | :return: None - None of the electrodes requested exist in the data
1350 | SUCCESS - All file subsets extracted and saved
1351 | """
1352 |
1353 | # Initializations
1354 | elec_id_indices = []
1355 | file_num = 1
1356 | pausing = False
1357 | datafile_datapt_size = self.basic_header["ChannelCount"] * DATA_BYTE_SIZE
1358 | self.datafile.seek(0, 0)
1359 |
1360 | # Run electrode id checks and set num_elecs
1361 | elec_ids = check_elecid(elec_ids)
1362 | if self.basic_header["FileSpec"] == "2.1":
1363 | all_elec_ids = self.basic_header["ChannelID"]
1364 | else:
1365 | all_elec_ids = [x["ElectrodeID"] for x in self.extended_headers]
1366 |
1367 | if elec_ids == ELEC_ID_DEF:
1368 | elec_ids = all_elec_ids
1369 | else:
1370 | elec_ids = check_dataelecid(elec_ids, all_elec_ids)
1371 | if not elec_ids:
1372 | return None
1373 | else:
1374 | elec_id_indices = [all_elec_ids.index(x) for x in elec_ids]
1375 |
1376 | num_elecs = len(elec_ids)
1377 |
1378 | # If file_size or file_time_s passed, check it and set file_sizing accordingly
1379 | if file_time_s:
1380 | if file_time_s and file_size:
1381 | print(
1382 | "\nWARNING: Only one of file_size or file_time_s can be passed, defaulting to file_time_s."
1383 | )
1384 | file_size = int(
1385 | num_elecs
1386 | * DATA_BYTE_SIZE
1387 | * file_time_s
1388 | * self.basic_header["TimeStampResolution"]
1389 | / self.basic_header["Period"]
1390 | )
1391 | if self.basic_header["FileSpec"] == "2.1":
1392 | file_size += 32 + 4 * num_elecs
1393 | else:
1394 | file_size += (
1395 | NSX_BASIC_HEADER_BYTES_22 + NSX_EXT_HEADER_BYTES_22 * num_elecs + 5
1396 | )
1397 | print(
1398 | "\nBased on timing request, file size will be {0:d} Mb".format(
1399 | int(file_size / 1024**2)
1400 | )
1401 | )
1402 | elif file_size:
1403 | file_size = check_filesize(file_size)
1404 |
1405 | # Create and open subset file as writable binary, if it already exists ask user for overwrite permission
1406 | file_name, file_ext = ospath.splitext(self.datafile.name)
1407 | if file_suffix:
1408 | file_name += "_" + file_suffix
1409 | else:
1410 | file_name += "_subset"
1411 |
1412 | if ospath.isfile(file_name + "_000" + file_ext):
1413 | if "y" != input(
1414 | "\nFile '"
1415 | + file_name.split("/")[-1]
1416 | + "_xxx"
1417 | + file_ext
1418 | + "' already exists, overwrite [y/n]: "
1419 | ):
1420 | print("\nExiting, no overwrite, returning None")
1421 | return None
1422 | else:
1423 | print("\n*** Overwriting existing subset files ***")
1424 |
1425 | subset_file = open(file_name + "_000" + file_ext, "wb")
1426 | print("\nWriting subset file: " + ospath.split(subset_file.name)[1])
1427 |
1428 | # For file spec 2.1:
1429 | # 1) copy the first 28 bytes from the datafile (these are unchanged)
1430 | # 2) write subset channel count and channel ID to file
1431 | # 3) skip ahead in datafile the number of bytes in datafile ChannelCount(4) plus ChannelID (4*ChannelCount)
1432 | if self.basic_header["FileSpec"] == "2.1":
1433 | subset_file.write(self.datafile.read(28))
1434 | subset_file.write(np.array(num_elecs).astype(np.uint32).tobytes())
1435 | subset_file.write(np.array(elec_ids).astype(np.uint32).tobytes())
1436 | self.datafile.seek(4 + 4 * self.basic_header["ChannelCount"], 1)
1437 |
1438 | # For file spec 2.2 and above
1439 | # 1) copy the first 10 bytes from the datafile (unchanged)
1440 | # 2) write subset bytes-in-headers and skip 4 bytes in datafile, noting position of this for update later
1441 | # 3) copy the next 296 bytes from datafile (unchanged)
1442 | # 4) write subset channel-count value and skip 4 bytes in datafile
1443 | # 5) append extended headers based on the channel ID. Must read the first 4 bytes, determine if correct
1444 | # Channel ID, repack first 4 bytes, write to disk, then copy remaining 62 (66-4) bytes
1445 | else:
1446 | subset_file.write(self.datafile.read(10))
1447 | bytes_in_headers = (
1448 | NSX_BASIC_HEADER_BYTES_22 + NSX_EXT_HEADER_BYTES_22 * num_elecs
1449 | )
1450 | num_pts_header_pos = bytes_in_headers + 5
1451 | subset_file.write(np.array(bytes_in_headers).astype(np.uint32).tobytes())
1452 | self.datafile.seek(4, 1)
1453 | subset_file.write(self.datafile.read(296))
1454 | subset_file.write(np.array(num_elecs).astype(np.uint32).tobytes())
1455 | self.datafile.seek(4, 1)
1456 |
1457 | for i in range(len(self.extended_headers)):
1458 | h_type = self.datafile.read(2)
1459 | chan_id = self.datafile.read(2)
1460 | if unpack(""
151 | ]
152 | },
153 | "metadata": {},
154 | "output_type": "display_data"
155 | }
156 | ],
157 | "source": [
158 | "plt.plot(t, cont_data[\"data\"][seg_id][ch_idx])\n",
159 | "plt.axis([t[0], t[-1], min(cont_data[\"data\"][seg_id][ch_idx]), max(cont_data[\"data\"][seg_id][ch_idx])])\n",
160 | "plt.locator_params(axis=\"y\", nbins=20)\n",
161 | "plt.xlabel(\"Time (s)\")\n",
162 | "# plt.ylabel(\"Output (\" + nsx_file.extended_headers[hdr_idx]['Units'] + \")\")\n",
163 | "# plt.title(nsx_file.extended_headers[hdr_idx]['ElectrodeLabel'])\n",
164 | "plt.show()"
165 | ]
166 | }
167 | ],
168 | "metadata": {
169 | "kernelspec": {
170 | "display_name": "brn_nsx",
171 | "language": "python",
172 | "name": "python3"
173 | },
174 | "language_info": {
175 | "codemirror_mode": {
176 | "name": "ipython",
177 | "version": 3
178 | },
179 | "file_extension": ".py",
180 | "mimetype": "text/x-python",
181 | "name": "python",
182 | "nbconvert_exporter": "python",
183 | "pygments_lexer": "ipython3",
184 | "version": "3.9.13"
185 | },
186 | "orig_nbformat": 4
187 | },
188 | "nbformat": 4,
189 | "nbformat_minor": 2
190 | }
191 |
--------------------------------------------------------------------------------
/examples/save_subset_nsx.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example of how to extract data from a Blackrock Nsx data file and save new subset Nsx data files
4 | current version: 1.1.1 --- 07/22/2016
5 |
6 | @author: Mitch Frankel - Blackrock Microsystems
7 | """
8 |
9 | """
10 | Version History:
11 | v1.0.0 - 07/08/2016 - initial release - requires brpylib v1.1.0 or higher
12 | v1.1.0 - 07/12/2016 - addition of version checking for brpylib starting with v1.2.0
13 | v1.1.1 - 07/22/2016 - minor modifications to use close() functionality of NsxFile class
14 | """
15 |
16 | # Imports
17 | from brpylib import NsxFile, brpylib_ver
18 |
19 | # Version control
20 | brpylib_ver_req = "1.2.1"
21 | if brpylib_ver.split('.') < brpylib_ver_req.split('.'):
22 | raise Exception("requires brpylib " + brpylib_ver_req + " or higher, please use latest version")
23 |
24 | # Inits
25 | datafile = 'D:/Dropbox/BlackrockDB/software/sampledata/The Most Perfect Data in the WWWorld/' \
26 | 'sampleData.ns6'
27 |
28 | # Open file and extract headers
29 | brns_file = NsxFile(datafile)
30 |
31 | # save a subset of data based on elec_ids
32 | brns_file.savesubsetnsx(elec_ids=[1, 2, 5, 15, 20, 200], file_suffix='elec_subset')
33 |
34 | # save a subset of data based on file sizing (100 Mb)
35 | brns_file.savesubsetnsx(file_size=(1024**2) * 100, file_suffix='size_subset')
36 |
37 | # save a subset of data based on file timing
38 | brns_file.savesubsetnsx(file_time_s=30, file_suffix='time_subset')
39 |
40 | # save a subset of data based on elec_ids and timing
41 | brns_file.savesubsetnsx(elec_ids=[1, 2, 5, 15, 20, 200], file_time_s=30, file_suffix='elecAndTime_subset')
42 |
43 | # Close the original datafile
44 | brns_file.close()
45 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=61.0.0", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "brpylib"
7 | description = "Blackrock Neurotech Python utilities"
8 | readme = "README.md"
9 | authors = [{ name = "Blackrock Neurotech", email = "support@blackrockneuro.com" }]
10 |
11 | dependencies = [
12 | "numpy",
13 | ]
14 | dynamic = ["version"]
15 |
16 | [project.optional-dependencies]
17 | dev = [
18 | "matplotlib",
19 | "qtpy",
20 | "pyside6_essentials; python_version>='3.6'",
21 | "pyside2; python_version<'3.6'",
22 | "jupyterlab"
23 | ]
24 | test = [
25 | "pytest",
26 | ]
27 |
28 | [project.urls]
29 | Repository = "https://github.com/BlackrockNeurotech/Python-Utilities"
30 | Homepage = "https://blackrockneurotech.com/research/support/#manuals-and-software-downloads"
31 |
32 | [tool.setuptools.dynamic]
33 | version = {attr = "brpylib.brpylib.brpylib_ver"}
34 |
35 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup()
4 |
--------------------------------------------------------------------------------