Skip to content

Commit 72ec76a

Browse files
h-mayorquinzm711
andauthored
Blackrock add summary of automatic data segmentation (#1769)
* blackrock improved message * improve erro furhter * typo * variable naming * anotehr typo * another typo * readability * fix error verison II the revenge --------- Co-authored-by: Zach McKenzie <[email protected]>
1 parent fc6997c commit 72ec76a

File tree

1 file changed

+86
-34
lines changed

1 file changed

+86
-34
lines changed

neo/rawio/blackrockrawio.py

Lines changed: 86 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -323,13 +323,25 @@ def _parse_header(self):
323323
self._nsx_basic_header = {}
324324
self._nsx_ext_header = {}
325325
self._nsx_data_header = {}
326+
self._nsx_sampling_frequency = {}
326327

328+
# Read headers
327329
for nsx_nb in self._avail_nsx:
328330
spec_version = self._nsx_spec[nsx_nb] = self._extract_nsx_file_spec(nsx_nb)
329331
# read nsx headers
330332
nsx_header_reader = self._nsx_header_reader[spec_version]
331333
self._nsx_basic_header[nsx_nb], self._nsx_ext_header[nsx_nb] = nsx_header_reader(nsx_nb)
334+
335+
# The Blackrock defines period as the number of 1/30_000 seconds between data points
336+
# E.g. it is 1 for 30_000, 3 for 10_000, etc
337+
nsx_period = self._nsx_basic_header[nsx_nb]["period"]
338+
sampling_rate = 30_000.0 / nsx_period
339+
self._nsx_sampling_frequency[nsx_nb] = float(sampling_rate)
332340

341+
342+
# Parase data packages
343+
for nsx_nb in self._avail_nsx:
344+
333345
# The only way to know if it is the Precision Time Protocol of file spec 3.0
334346
# is to check for nanosecond timestamp resolution.
335347
is_ptp_variant = (
@@ -386,7 +398,8 @@ def _parse_header(self):
386398
self._match_nsx_and_nev_segment_ids(nsx_nb)
387399

388400
self.nsx_datas = {}
389-
self.sig_sampling_rates = {}
401+
# Keep public attribute for backward compatibility but let's use the private one and maybe deprecate this at some point
402+
self.sig_sampling_rates = {nsx_number: self._nsx_sampling_frequency[nsx_number] for nsx_number in self.nsx_to_load}
390403
if len(self.nsx_to_load) > 0:
391404
for nsx_nb in self.nsx_to_load:
392405
basic_header = self._nsx_basic_header[nsx_nb]
@@ -403,8 +416,7 @@ def _parse_header(self):
403416
_data_reader_fun = self._nsx_data_reader[spec_version]
404417
self.nsx_datas[nsx_nb] = _data_reader_fun(nsx_nb)
405418

406-
sr = float(self.main_sampling_rate / basic_header["period"])
407-
self.sig_sampling_rates[nsx_nb] = sr
419+
sr = self._nsx_sampling_frequency[nsx_nb]
408420

409421
if spec_version in ["2.2", "2.3", "3.0"]:
410422
ext_header = self._nsx_ext_header[nsx_nb]
@@ -473,7 +485,7 @@ def _parse_header(self):
473485
length = self.nsx_datas[nsx_nb][data_bl].shape[0]
474486
if self._nsx_data_header[nsx_nb] is None:
475487
t_start = 0.0
476-
t_stop = max(t_stop, length / self.sig_sampling_rates[nsx_nb])
488+
t_stop = max(t_stop, length / self._nsx_sampling_frequency[nsx_nb])
477489
else:
478490
timestamps = self._nsx_data_header[nsx_nb][data_bl]["timestamp"]
479491
if hasattr(timestamps, "size") and timestamps.size == length:
@@ -482,7 +494,7 @@ def _parse_header(self):
482494
t_stop = max(t_stop, timestamps[-1] / ts_res + sec_per_samp)
483495
else:
484496
t_start = timestamps / ts_res
485-
t_stop = max(t_stop, t_start + length / self.sig_sampling_rates[nsx_nb])
497+
t_stop = max(t_stop, t_start + length / self._nsx_sampling_frequency[nsx_nb])
486498
self._sigs_t_starts[nsx_nb].append(t_start)
487499

488500
if self._avail_files["nev"]:
@@ -1041,43 +1053,83 @@ def _read_nsx_dataheader_spec_v30_ptp(
10411053
filesize = self._get_file_size(filename)
10421054

10431055
data_header = {}
1044-
index = 0
1045-
10461056
if offset is None:
10471057
# This is read as an uint32 numpy scalar from the header so we transform it to python int
1048-
offset = int(self._nsx_basic_header[nsx_nb]["bytes_in_headers"])
1058+
header_size = int(self._nsx_basic_header[nsx_nb]["bytes_in_headers"])
1059+
else:
1060+
header_size = offset
10491061

10501062
ptp_dt = [
10511063
("reserved", "uint8"),
10521064
("timestamps", "uint64"),
10531065
("num_data_points", "uint32"),
1054-
("samples", "int16", self._nsx_basic_header[nsx_nb]["channel_count"]),
1066+
("samples", "int16", (self._nsx_basic_header[nsx_nb]["channel_count"],)),
10551067
]
1056-
npackets = int((filesize - offset) / np.dtype(ptp_dt).itemsize)
1057-
struct_arr = np.memmap(filename, dtype=ptp_dt, shape=npackets, offset=offset, mode="r")
1068+
npackets = int((filesize - header_size) / np.dtype(ptp_dt).itemsize)
1069+
struct_arr = np.memmap(filename, dtype=ptp_dt, shape=npackets, offset=header_size, mode="r")
10581070

10591071
if not np.all(struct_arr["num_data_points"] == 1):
10601072
# some packets have more than 1 sample. Not actually ptp. Revert to non-ptp variant.
1061-
return self._read_nsx_dataheader_spec_v22_30(nsx_nb, filesize=filesize, offset=offset)
1062-
1063-
# It is still possible there was a data break and the file has multiple segments.
1064-
# We can no longer rely on the presence of a header indicating a new segment,
1065-
# so we look for timestamp differences greater than double the expected interval.
1066-
_period = self._nsx_basic_header[nsx_nb]["period"] # 30_000 ^-1 s per sample
1067-
_nominal_rate = 30_000 / _period # samples per sec; maybe 30_000 should be ["sample_resolution"]
1068-
_clock_rate = self._nsx_basic_header[nsx_nb]["timestamp_resolution"] # clocks per sec
1069-
clk_per_samp = _clock_rate / _nominal_rate # clk/sec / smp/sec = clk/smp
1070-
seg_thresh_clk = int(2 * clk_per_samp)
1071-
seg_starts = np.hstack((0, 1 + np.argwhere(np.diff(struct_arr["timestamps"]) > seg_thresh_clk).flatten()))
1072-
for seg_ix, seg_start_idx in enumerate(seg_starts):
1073-
if seg_ix < (len(seg_starts) - 1):
1074-
seg_stop_idx = seg_starts[seg_ix + 1]
1075-
else:
1076-
seg_stop_idx = len(struct_arr) - 1
1077-
seg_offset = offset + seg_start_idx * struct_arr.dtype.itemsize
1078-
num_data_pts = seg_stop_idx - seg_start_idx
1073+
return self._read_nsx_dataheader_spec_v22_30(nsx_nb, filesize=filesize, offset=header_size)
1074+
1075+
1076+
# Segment data, at the moment, we segment, where the data has gaps that are longer
1077+
# than twice the sampling period.
1078+
sampling_rate = self._nsx_sampling_frequency[nsx_nb]
1079+
segmentation_threshold = 2.0 / sampling_rate
1080+
1081+
# The raw timestamps are the indices of an ideal clock that ticks at `timestamp_resolution` times per second.
1082+
# We convert this indices to actual timestamps in seconds
1083+
raw_timestamps = struct_arr["timestamps"]
1084+
timestamps_sampling_rate = self._nsx_basic_header[nsx_nb]["timestamp_resolution"] # clocks per sec uint64 or uint32
1085+
timestamps_in_seconds = raw_timestamps / timestamps_sampling_rate
1086+
1087+
time_differences = np.diff(timestamps_in_seconds)
1088+
gap_indices = np.argwhere(time_differences > segmentation_threshold).flatten()
1089+
segment_starts = np.hstack((0, 1 + gap_indices))
1090+
1091+
# Report gaps if any are found
1092+
if len(gap_indices) > 0:
1093+
import warnings
1094+
threshold_ms = segmentation_threshold * 1000
1095+
1096+
# Calculate all gap details in vectorized operations
1097+
gap_durations_seconds = time_differences[gap_indices]
1098+
gap_durations_ms = gap_durations_seconds * 1000
1099+
gap_positions_seconds = timestamps_in_seconds[gap_indices] - timestamps_in_seconds[0]
1100+
1101+
# Build gap detail lines all at once
1102+
gap_detail_lines = [
1103+
f"| {index:>15,} | {pos:>21.6f} | {dur:>21.3f} |\n"
1104+
for index, pos, dur in zip(gap_indices, gap_positions_seconds, gap_durations_ms)
1105+
]
1106+
1107+
segmentation_report_message = (
1108+
f"\nFound {len(gap_indices)} gaps for nsx {nsx_nb} where samples are farther apart than {threshold_ms:.3f} ms.\n"
1109+
f"Data will be segmented at these locations to create {len(segment_starts)} segments.\n\n"
1110+
"Gap Details:\n"
1111+
"+-----------------+-----------------------+-----------------------+\n"
1112+
"| Sample Index | Sample at | Gap Jump |\n"
1113+
"| | (Seconds) | (Milliseconds) |\n"
1114+
"+-----------------+-----------------------+-----------------------+\n"
1115+
+ ''.join(gap_detail_lines) +
1116+
"+-----------------+-----------------------+-----------------------+\n"
1117+
)
1118+
warnings.warn(segmentation_report_message)
1119+
1120+
# Calculate all segment boundaries and derived values in one operation
1121+
segment_boundaries = list(segment_starts) + [len(struct_arr) - 1]
1122+
segment_num_data_points = [segment_boundaries[i+1] - segment_boundaries[i] for i in range(len(segment_starts))]
1123+
1124+
size_of_data_block = struct_arr.dtype.itemsize
1125+
segment_offsets = [header_size + pos * size_of_data_block for pos in segment_starts]
1126+
1127+
num_segments = len(segment_starts)
1128+
for segment_index in range(num_segments):
1129+
seg_offset = segment_offsets[segment_index]
1130+
num_data_pts = segment_num_data_points[segment_index]
10791131
seg_struct_arr = np.memmap(filename, dtype=ptp_dt, shape=num_data_pts, offset=seg_offset, mode="r")
1080-
data_header[seg_ix] = {
1132+
data_header[segment_index] = {
10811133
"header": None,
10821134
"timestamp": seg_struct_arr["timestamps"], # Note, this is an array, not a scalar
10831135
"nb_data_points": num_data_pts,
@@ -1089,7 +1141,7 @@ def _read_nsx_data_spec_v21(self, nsx_nb):
10891141
"""
10901142
Extract nsx data from a 2.1 .nsx file
10911143
"""
1092-
filename = ".".join([self._filenames["nsx"], f"ns{nsx_nb}"])
1144+
filename = f"{self._filenames['nsx']}.ns{nsx_nb}"
10931145

10941146
# get shape of data
10951147
shape = (
@@ -1132,13 +1184,13 @@ def _read_nsx_data_spec_v30_ptp(self, nsx_nb):
11321184
yielding a timestamp per sample. Blocks can arise
11331185
if the recording was paused by the user.
11341186
"""
1135-
filename = ".".join([self._filenames["nsx"], f"ns{nsx_nb}"])
1187+
filename = f"{self._filenames['nsx']}.ns{nsx_nb}"
11361188

11371189
ptp_dt = [
11381190
("reserved", "uint8"),
11391191
("timestamps", "uint64"),
11401192
("num_data_points", "uint32"),
1141-
("samples", "int16", self._nsx_basic_header[nsx_nb]["channel_count"]),
1193+
("samples", "int16", (self._nsx_basic_header[nsx_nb]["channel_count"],)),
11421194
]
11431195

11441196
data = {}
@@ -1161,7 +1213,7 @@ def _read_nev_header(self, ext_header_variants):
11611213
"""
11621214
Extract nev header information from a of specific .nsx header variant
11631215
"""
1164-
filename = ".".join([self._filenames["nev"], "nev"])
1216+
filename = f"{self._filenames['nev']}.nev"
11651217

11661218
# basic header
11671219
dt0 = [

0 commit comments

Comments
 (0)