From 1de6d171bf620db56a6056daf707ffa6e7d0bc2c Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 9 Dec 2024 09:19:12 +0200 Subject: [PATCH 01/60] get bits --- src/asammdf/blocks/cutils.c | 47 ++++++++++++++++++++++++++++++++++++ src/asammdf/blocks/mdf_v4.py | 9 ++++++- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 5f2e6ac25..d8daa3bfb 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1312,6 +1312,52 @@ static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) } } +static PyObject *get_invalidation_bits(PyObject *self, PyObject *args) +{ + Py_ssize_t count, size, actual_byte_count, delta, ch_invalidation_pos, invalidation_size,; + PyObject *data_block, *out; + + Py_ssize_t record_size, byte_offset, byte_count; + + uint8_t mask; + + char *inptr, *outptr; + + if (!PyArg_ParseTuple(args, "Onnn", &data_block, &invalidation_size, &invalidation_pos)) + { + return 0; + } + else + { + if (PyBytes_Check(data_block)) { + size = PyBytes_Size(data_block); + inptr = PyBytes_AsString(data_block); + } + else { + size = PyByteArray_Size(data_block); + inptr = PyByteArray_AsString(data_block); + } + + count = size / invalidation_size; + byte_offset = invalidation_pos / 8; + mask = (uint8_t ) (1 << (invalidation_pos % 8)); + + inptr += byte_offset; + + npy_intp dims[1]; + dims[0] = count; + out = (PyArrayObject *)PyArray_EMPTY(1, dims, NPY_BOOL, 0); + outptr = (uint8_t *)PyArray_GETPTR1(out, 0); + + for (int i=0; i 90: + 1/0 - _master = self.get_master(index, data=fragments[master_index]) + _master = self.get_master(index, data=fragments[master_index], one_piece=True) self._set_temporary_master(_master) if idx == 0: From acd0fc579182cfd3b776b44cb95f412eb465952a Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 9 Dec 2024 09:53:02 +0200 Subject: [PATCH 02/60] covnert test --- src/asammdf/blocks/cutils.c | 16 ++++++------- src/asammdf/blocks/mdf_v4.py | 45 ++++++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index d8daa3bfb..05efe48f4 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1312,18 +1312,16 @@ static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) } } -static PyObject *get_invalidation_bits(PyObject *self, PyObject *args) +static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) { - Py_ssize_t count, size, actual_byte_count, delta, ch_invalidation_pos, invalidation_size,; + Py_ssize_t count, size, actual_byte_count, delta, invalidation_pos, invalidation_size; PyObject *data_block, *out; Py_ssize_t record_size, byte_offset, byte_count; - uint8_t mask; + uint8_t mask, *inptr, *outptr; - char *inptr, *outptr; - - if (!PyArg_ParseTuple(args, "Onnn", &data_block, &invalidation_size, &invalidation_pos)) + if (!PyArg_ParseTuple(args, "Onn", &data_block, &invalidation_size, &invalidation_pos)) { return 0; } @@ -1331,11 +1329,11 @@ static PyObject *get_invalidation_bits(PyObject *self, PyObject *args) { if (PyBytes_Check(data_block)) { size = PyBytes_Size(data_block); - inptr = PyBytes_AsString(data_block); + inptr = (uint8_t *)PyBytes_AsString(data_block); } else { size = PyByteArray_Size(data_block); - inptr = PyByteArray_AsString(data_block); + inptr = (uint8_t *)PyByteArray_AsString(data_block); } count = size / invalidation_size; @@ -1596,7 +1594,7 @@ static PyMethodDef myMethods[] = { {"sort_data_block", sort_data_block, METH_VARARGS, "sort raw data group block"}, {"positions", positions, METH_VARARGS, "positions"}, {"get_channel_raw_bytes", get_channel_raw_bytes, METH_VARARGS, "get_channel_raw_bytes"}, - {"get_invalidation_bits", get_invalidation_bits, METH_VARARGS, "get_invalidation_bits"}, + {"get_invalidation_bits_array", get_invalidation_bits_array, METH_VARARGS, "get_invalidation_bits_array"}, {"data_block_from_arrays", data_block_from_arrays, METH_VARARGS, "data_block_from_arrays"}, {"get_idx_with_edges", get_idx_with_edges, METH_VARARGS, "get_idx_with_edges"}, {"reverse_transposition", reverse_transposition, METH_VARARGS, "reverse_transposition"}, diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 47c444938..52b578bc3 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -177,6 +177,7 @@ get_channel_raw_bytes, get_vlsd_max_sample_size, sort_data_block, +get_invalidation_bits_array, ) @@ -1381,6 +1382,9 @@ def _load_data( ) -> Iterator[tuple[bytes, int, int, bytes | None]]: """get group's data block bytes""" + from time import perf_counter + cc = 0 + offset = 0 invalidation_offset = 0 has_yielded = False @@ -1461,6 +1465,9 @@ def _load_data( cur_invalidation_size = 0 invalidation_data = [] + tt = perf_counter() + ss = 0 + while True: try: info = next(blocks) @@ -1524,6 +1531,9 @@ def _load_data( seek(address) new_data = read(compressed_size) + cc += 1 + ss += original_size + if block_type == v4c.DZ_BLOCK_DEFLATE: new_data = decompress(new_data, bufsize=original_size) elif block_type == v4c.DZ_BLOCK_TRANSPOSED: @@ -1685,6 +1695,12 @@ def _load_data( invalidation_data.append(new_invalidation_data) cur_invalidation_size += inv_size + if vv := (perf_counter() - tt) > 5: + print(f'{ss / 1024/1024 / vv:.3f} MB/s {cc=}') + cc = 0 + ss = 0 + tt = perf_counter() + if cur_size: data_ = buffer[:cur_size] if rm and invalidation_size: @@ -2603,12 +2619,14 @@ def get_invalidation_bits( group = self.groups[group_index] data_bytes, offset, _count, invalidation_bytes = fragment - try: - invalidation = self._invalidation_cache[(group_index, offset, _count)] - except KeyError: - size = group.channel_group.invalidation_bytes_nr + invalidation_bytes_nr = group.channel_group.invalidation_bytes_nr + ch_invalidation_pos = channel.pos_invalidation_bit + + if invalidation_bytes is None: + try: + invalidation_bytes = self._invalidation_cache[(group_index, offset, _count)] + except KeyError: - if invalidation_bytes is None: record = group.record if record is None: self._prepare_record(group) @@ -2617,19 +2635,16 @@ def get_invalidation_bits( data_bytes, group.channel_group.samples_byte_nr + group.channel_group.invalidation_bytes_nr, group.channel_group.samples_byte_nr, - size, + invalidation_bytes_nr, ) - invalidation = frombuffer(invalidation_bytes, dtype=f"({size},)u1") - self._invalidation_cache[(group_index, offset, _count)] = invalidation + self._invalidation_cache[(group_index, offset, _count)] = invalidation_bytes - ch_invalidation_pos = channel.pos_invalidation_bit - pos_byte, pos_offset = divmod(ch_invalidation_pos, 8) - - mask = 1 << pos_offset - - invalidation_bits = invalidation[:, pos_byte] & mask - invalidation_bits = invalidation_bits.view(bool) + invalidation_bits = get_invalidation_bits_array( + invalidation_bytes, + invalidation_bytes_nr, + ch_invalidation_pos + ) return InvalidationArray(invalidation_bits, (group_index, ch_invalidation_pos)) From 6503bef5f09f567dab4ab7eec04c2e3df339a705 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 9 Dec 2024 17:20:53 +0200 Subject: [PATCH 03/60] workign threads on windows --- src/asammdf/blocks/cutils.c | 205 +++++++++++++++++++++++++++++++++++ src/asammdf/blocks/mdf_v4.py | 3 +- 2 files changed, 207 insertions(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 05efe48f4..18c02a87c 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -9,6 +9,17 @@ #include #include #include +#define MAX_THREADS 64 + +#if defined(_WIN32) + #include + #include +#else + #include +#endif + +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#define MIN(a,b) ((a) < (b) ? (a) : (b)) #define PY_PRINTF(o) \ PyObject_Print(o, stdout, 0); \ @@ -1312,6 +1323,8 @@ static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) } } +int MAX_THR = 8; + static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) { Py_ssize_t count, size, actual_byte_count, delta, invalidation_pos, invalidation_size; @@ -1356,6 +1369,196 @@ static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) } } +typedef struct MyData { + uint8_t * inptr; + Py_ssize_t size; + Py_ssize_t record_size; + Py_ssize_t byte_offset; + Py_ssize_t byte_count; + uint8_t * outptr; + Py_ssize_t out_size; +} MYDATA, *PMYDATA; + +typedef struct ChannelInfo { + PMYDATA data; + Py_ssize_t count; + Py_ssize_t idx; +} MyChannelInfo, *PMyChannelInfo; + + +void * get_channel_raw_bytes_C(void *lpParam ) +{ + Py_ssize_t count, actual_byte_count, delta; + PMYDATA data; + PMyChannelInfo indata; + indata = (PMyChannelInfo) lpParam; + + Py_ssize_t signal_count, thread_idx; + signal_count = indata->count; + thread_idx = indata->idx; + data = indata->data; + for (Py_ssize_t i = 0; irecord_size) + { + data->outptr = NULL; + data->out_size = 0; + } + else if (data->record_size < data->byte_offset + data->byte_count) + { + inptr = data->inptr; + delta = data->byte_offset + data->byte_count - data->record_size; + actual_byte_count = data->record_size - data->byte_offset; + + count = data->size / data->record_size; + + outptr = (uint8_t *) malloc(count * data->byte_count); + data->outptr = outptr; + data->out_size = count * data->byte_count; + + inptr += data->byte_offset; + + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < actual_byte_count; j++) + *outptr++ = *inptr++; + + inptr += data->record_size - actual_byte_count; + for (Py_ssize_t j = 0; j < delta; j++) + { + *outptr++ = 0; + } + } + } + else + { + inptr = data->inptr; + count = data->size / data->record_size; + outptr = (uint8_t *) malloc(count * data->byte_count); + data->outptr = outptr; + data->out_size = count * data->byte_count; + + inptr += data->byte_offset; + + delta = data->record_size - data->byte_count; + + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < data->byte_count; j++) + *outptr++ = *inptr++; + inptr += delta; + } + } + + for (Py_ssize_t i = 0; i 1) { + + Py_BEGIN_ALLOW_THREADS + + for (int i=0; i< max_th; i++) { + hThreads[i] = CreateThread( + NULL, + 0, + get_channel_raw_bytes_C, + &ch_info[i], + 0, + &dwThreadIdArray[i] + ); + } + + WaitForMultipleObjects(max_th, hThreads, true, INFINITE); + for (int i=0; i< max_th; i++) { + CloseHandle(hThreads[i]); + } + + Py_END_ALLOW_THREADS + + } + else { + get_channel_raw_bytes_C(&ch_info[0]); + } + + out = PyList_New(signal_count); + for (int i=0; i 5: - print(f'{ss / 1024/1024 / vv:.3f} MB/s {cc=}') + print(f'{ss / 1024/1024 / vv:.3f} MB/s {cc=}') cc = 0 ss = 0 tt = perf_counter() From 8161a8090ef85f9ede77527b107a5117e53acf9c Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 10 Dec 2024 08:35:06 +0200 Subject: [PATCH 04/60] get parallel --- src/asammdf/blocks/cutils.c | 27 ++++++++--------------- src/asammdf/blocks/mdf_v4.py | 42 ++++++++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 24 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 18c02a87c..53c89c9de 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1323,7 +1323,6 @@ static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) } } -int MAX_THR = 8; static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) { @@ -1401,7 +1400,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) uint8_t *outptr, *inptr; - for (int idx = thread_idx; idx < signal_count; idx += MAX_THR) { + for (int idx = thread_idx; idx < signal_count; idx += MAX_THREADS) { if (!data->record_size) { data->outptr = NULL; @@ -1453,7 +1452,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) } } - for (Py_ssize_t i = 0; i 1) { - Py_BEGIN_ALLOW_THREADS - for (int i=0; i< max_th; i++) { + for (int i=0; i< MAX_THREADS; i++) { hThreads[i] = CreateThread( NULL, 0, @@ -1527,18 +1523,13 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) ); } - WaitForMultipleObjects(max_th, hThreads, true, INFINITE); - for (int i=0; i< max_th; i++) { + WaitForMultipleObjects(MAX_THREADS, hThreads, true, INFINITE); + for (int i=0; i< MAX_THREADS; i++) { CloseHandle(hThreads[i]); } Py_END_ALLOW_THREADS - } - else { - get_channel_raw_bytes_C(&ch_info[0]); - } - out = PyList_New(signal_count); for (int i=0; i grp.channel_group.samples_byte_nr: + ch_info.append([0, 0]) + elif dependency_list: + ch_info.append([0, 0]) + else: + if info is not None: + _, byte_size, byte_offset, _ = info + ch_info.append([byte_offset, byte_size]) + else: + ch_info.append([0, 0]) + while True: try: fragments = [next(stream) for stream in data_streams] @@ -8319,19 +8345,23 @@ def _yield_selected_signals( else: signals = [(_master, None)] - vlsd_max_sizes = [] - for fragment, (group_index, channels) in zip(fragments, groups.items()): grp = self.groups[group_index] if not grp.single_channel_dtype: self._prepare_record(grp) + channels_raw_data = get_channel_raw_bytes_parallel( + fragment[0], + grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, + group_info[group_index] + ) + if idx == 0: - for channel_index in channels: + for channel_index, raw_data in zip(channels, channels_raw_data): signal = self.get( group=group_index, index=channel_index, - data=fragment, + data=(raw_data, -1, -1, None) if raw_data else fragment, raw=True, ignore_invalidation_bits=True, samples_only=False, @@ -8340,11 +8370,11 @@ def _yield_selected_signals( signals.append(signal) else: - for channel_index in channels: + for channel_index, raw_data in zip(channels, channels_raw_data): signal, invalidation_bits = self.get( group=group_index, index=channel_index, - data=fragment, + data=(raw_data, -1, -1, None) if raw_data else fragment, raw=True, ignore_invalidation_bits=True, samples_only=True, From fa88aa5e1dd639a40de7d2bb31e4aea00cf43058 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 10 Dec 2024 09:19:34 +0200 Subject: [PATCH 05/60] print --- src/asammdf/blocks/mdf_v4.py | 42 ++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index c6d7db1d7..9dafcb0ee 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -1697,8 +1697,8 @@ def _load_data( invalidation_data.append(new_invalidation_data) cur_invalidation_size += inv_size - if vv := (perf_counter() - tt) > 5: - print(f'{ss / 1024/1024 / vv:.3f} MB/s {cc=}') + if (vv := (perf_counter() - tt)) > 5: + print(f'{ss / 1024/1024 / vv:.6f} MB/s {cc=} {vv=}') cc = 0 ss = 0 tt = perf_counter() @@ -7605,7 +7605,7 @@ def _get_scalar( if one_piece: fragment = data - data_bytes = fragment[0] + data_bytes, rec_offset, rec_count, _ = fragment info = grp.record[ch_nr] @@ -7614,12 +7614,15 @@ def _get_scalar( if ch_nr == 0 and len(grp.channels) == 1 and channel.dtype_fmt.itemsize == record_size: buffer = bytearray(data_bytes) else: - buffer = get_channel_raw_bytes( - data_bytes, - record_size + channel_group.invalidation_bytes_nr, - byte_offset, - byte_size, - ) + if (rec_offset, rec_count) != (-2, -2): + buffer = get_channel_raw_bytes( + data_bytes, + record_size + channel_group.invalidation_bytes_nr, + byte_offset, + byte_size, + ) + else: + buffer = data_bytes vals = frombuffer(buffer, dtype=dtype_) @@ -8319,11 +8322,11 @@ def _yield_selected_signals( if channel.byte_offset + ( channel.bit_offset + channel.bit_count) / 8 > grp.channel_group.samples_byte_nr: ch_info.append([0, 0]) - elif dependency_list: + elif dependency_list[channel_index]: ch_info.append([0, 0]) else: - if info is not None: - _, byte_size, byte_offset, _ = info + if info[channel_index] is not None: + _, byte_size, byte_offset, _ = info[channel_index] ch_info.append([byte_offset, byte_size]) else: ch_info.append([0, 0]) @@ -8337,6 +8340,7 @@ def _yield_selected_signals( if perf_counter() - tt > 90: 1/0 + # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) self._set_temporary_master(_master) @@ -8350,6 +8354,16 @@ def _yield_selected_signals( if not grp.single_channel_dtype: self._prepare_record(grp) + # print(f'Size = {len(fragment[0]) / 1024 / 1024:.3f} MB') + + # prepare the invalidation bytes for this group and fragment + invalidation_bytes = get_channel_raw_bytes( + fragment[0], + grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, + grp.channel_group.samples_byte_nr, + grp.channel_group.invalidation_bytes_nr, + ) + channels_raw_data = get_channel_raw_bytes_parallel( fragment[0], grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, @@ -8361,7 +8375,7 @@ def _yield_selected_signals( signal = self.get( group=group_index, index=channel_index, - data=(raw_data, -1, -1, None) if raw_data else fragment, + data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, raw=True, ignore_invalidation_bits=True, samples_only=False, @@ -8374,7 +8388,7 @@ def _yield_selected_signals( signal, invalidation_bits = self.get( group=group_index, index=channel_index, - data=(raw_data, -1, -1, None) if raw_data else fragment, + data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, raw=True, ignore_invalidation_bits=True, samples_only=True, From e05f8c7286b2e84c846f2bcaa4466795848f3e66 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 10 Dec 2024 10:42:39 +0200 Subject: [PATCH 06/60] linux threads --- src/asammdf/blocks/cutils.c | 21 ++++++-- src/asammdf/blocks/mdf_v4.py | 97 +++++++++++++++++++++++------------- 2 files changed, 78 insertions(+), 40 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 53c89c9de..1525af0de 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -9,7 +9,7 @@ #include #include #include -#define MAX_THREADS 64 +#define MAX_THREADS 4 #if defined(_WIN32) #include @@ -1458,7 +1458,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) return 0; } -#ifdef _WIN32 + static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) { Py_ssize_t count, size, actual_byte_count, delta; @@ -1468,8 +1468,13 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) Py_ssize_t signal_count, thread_count, remaining_signals, thread_pos; uint8_t *inptr, *outptr; + +#ifdef _WIN32 HANDLE hThreads[MAX_THREADS] = { NULL }; DWORD dwThreadIdArray[MAX_THREADS]; +#else + pthread_t dwThreadIdArray[MAX_THREADS]; +#endif PMYDATA pDataArray; PMyChannelInfo ch_info; @@ -1511,7 +1516,8 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) } Py_BEGIN_ALLOW_THREADS - + +#ifdef _WIN32 for (int i=0; i< MAX_THREADS; i++) { hThreads[i] = CreateThread( NULL, @@ -1527,6 +1533,14 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) for (int i=0; i< MAX_THREADS; i++) { CloseHandle(hThreads[i]); } +#else + for (int i=0; i< MAX_THREADS; i++) { + pthread_create(&(dwThreadIdArray[i]), NULL, get_channel_raw_bytes_C, &ch_info[i]); + } + for (int i=0; i< MAX_THREADS; i++) { + pthread_join(dwThreadIdArray[i], NULL); + } +#endif Py_END_ALLOW_THREADS @@ -1547,7 +1561,6 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) return out; } } -#endif struct dtype diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 9dafcb0ee..baff6edfb 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -8336,9 +8336,6 @@ def _yield_selected_signals( fragments = [next(stream) for stream in data_streams] except: break - - if perf_counter() - tt > 90: - 1/0 # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) @@ -8356,45 +8353,73 @@ def _yield_selected_signals( # print(f'Size = {len(fragment[0]) / 1024 / 1024:.3f} MB') - # prepare the invalidation bytes for this group and fragment - invalidation_bytes = get_channel_raw_bytes( - fragment[0], - grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, - grp.channel_group.samples_byte_nr, - grp.channel_group.invalidation_bytes_nr, - ) + if len(channels) >= 100: + # prepare the invalidation bytes for this group and fragment + invalidation_bytes = get_channel_raw_bytes( + fragment[0], + grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, + grp.channel_group.samples_byte_nr, + grp.channel_group.invalidation_bytes_nr, + ) - channels_raw_data = get_channel_raw_bytes_parallel( - fragment[0], - grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, - group_info[group_index] - ) + channels_raw_data = get_channel_raw_bytes_parallel( + fragment[0], + grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, + group_info[group_index] + ) - if idx == 0: - for channel_index, raw_data in zip(channels, channels_raw_data): - signal = self.get( - group=group_index, - index=channel_index, - data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, - raw=True, - ignore_invalidation_bits=True, - samples_only=False, - ) + if idx == 0: + for channel_index, raw_data in zip(channels, channels_raw_data): + signal = self.get( + group=group_index, + index=channel_index, + data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, + raw=True, + ignore_invalidation_bits=True, + samples_only=False, + ) - signals.append(signal) + signals.append(signal) + else: + for channel_index, raw_data in zip(channels, channels_raw_data): + signal, invalidation_bits = self.get( + group=group_index, + index=channel_index, + data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, + raw=True, + ignore_invalidation_bits=True, + samples_only=True, + ) + + signals.append((signal, invalidation_bits)) else: - for channel_index, raw_data in zip(channels, channels_raw_data): - signal, invalidation_bits = self.get( - group=group_index, - index=channel_index, - data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, - raw=True, - ignore_invalidation_bits=True, - samples_only=True, - ) - signals.append((signal, invalidation_bits)) + if idx == 0: + for channel_index in channels: + signal = self.get( + group=group_index, + index=channel_index, + data=fragment, + raw=True, + ignore_invalidation_bits=True, + samples_only=False, + ) + + signals.append(signal) + + else: + for channel_index in channels: + signal, invalidation_bits = self.get( + group=group_index, + index=channel_index, + data=fragment, + raw=True, + ignore_invalidation_bits=True, + samples_only=True, + ) + + signals.append((signal, invalidation_bits)) if version < "4.00": if idx == 0: From efbd08fc939b4d6c6981679588890bb975401999 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 10 Dec 2024 12:34:09 +0200 Subject: [PATCH 07/60] style --- src/asammdf/blocks/mdf_v4.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index baff6edfb..4a98a4575 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -175,10 +175,10 @@ data_block_from_arrays, extract, get_channel_raw_bytes, + get_channel_raw_bytes_parallel, + get_invalidation_bits_array, get_vlsd_max_sample_size, sort_data_block, - get_channel_raw_bytes_parallel, -get_invalidation_bits_array, ) @@ -1384,6 +1384,7 @@ def _load_data( """get group's data block bytes""" from time import perf_counter + cc = 0 offset = 0 @@ -1698,7 +1699,7 @@ def _load_data( cur_invalidation_size += inv_size if (vv := (perf_counter() - tt)) > 5: - print(f'{ss / 1024/1024 / vv:.6f} MB/s {cc=} {vv=}') + print(f"{ss / 1024/1024 / vv:.6f} MB/s {cc=} {vv=}") cc = 0 ss = 0 tt = perf_counter() @@ -2642,11 +2643,7 @@ def get_invalidation_bits( self._invalidation_cache[(group_index, offset, _count)] = invalidation_bytes - invalidation_bits = get_invalidation_bits_array( - invalidation_bytes, - invalidation_bytes_nr, - ch_invalidation_pos - ) + invalidation_bits = get_invalidation_bits_array(invalidation_bytes, invalidation_bytes_nr, ch_invalidation_pos) return InvalidationArray(invalidation_bits, (group_index, ch_invalidation_pos)) @@ -7614,7 +7611,7 @@ def _get_scalar( if ch_nr == 0 and len(grp.channels) == 1 and channel.dtype_fmt.itemsize == record_size: buffer = bytearray(data_bytes) else: - if (rec_offset, rec_count) != (-2, -2): + if (rec_offset, rec_count) != (-2, -2): buffer = get_channel_raw_bytes( data_bytes, record_size + channel_group.invalidation_bytes_nr, @@ -8270,9 +8267,9 @@ def _yield_selected_signals( version = version or self.version virtual_channel_group = self.virtual_groups[index] record_size = virtual_channel_group.record_size - + from time import perf_counter - + tt = perf_counter() if groups is None: @@ -8319,8 +8316,10 @@ def _yield_selected_signals( for channel_index in channels: channel = grp.channels[channel_index] - if channel.byte_offset + ( - channel.bit_offset + channel.bit_count) / 8 > grp.channel_group.samples_byte_nr: + if ( + channel.byte_offset + (channel.bit_offset + channel.bit_count) / 8 + > grp.channel_group.samples_byte_nr + ): ch_info.append([0, 0]) elif dependency_list[channel_index]: ch_info.append([0, 0]) @@ -8365,7 +8364,7 @@ def _yield_selected_signals( channels_raw_data = get_channel_raw_bytes_parallel( fragment[0], grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, - group_info[group_index] + group_info[group_index], ) if idx == 0: @@ -9586,7 +9585,7 @@ def save( dl_block = DataList(**kwargs) for i, data__ in enumerate(data): - + data_ = data__[0] if compression and self.version >= "4.10": From 8581d755684d197199d950c3c404b71c014fd244 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 10 Dec 2024 20:38:54 +0200 Subject: [PATCH 08/60] remove code duplication in the extend method --- src/asammdf/blocks/mdf_v4.py | 52 ++++++++---------------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 4a98a4575..4915fd999 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -5870,8 +5870,14 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No invalidation_bytes_nr = gp.channel_group.invalidation_bytes_nr for i, ((signal, invalidation_bits), sig_type) in enumerate(zip(signals, gp.signal_types)): - if invalidation_bits is not None and not isinstance(invalidation_bits, InvalidationArray): - invalidation_bits = InvalidationArray(invalidation_bits) + if invalidation_bytes_nr: + if invalidation_bits is not None: + if not isinstance(invalidation_bits, InvalidationArray): + invalidation_bits = InvalidationArray(invalidation_bits) + if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: + inval_bits[origin].append(invalidation_bits) + else: + inval_bits[origin] = invalidation_bits # first add the signals in the simple signal list if sig_type == v4c.SIGNAL_TYPE_SCALAR: @@ -5883,12 +5889,6 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No fields.append((signal, byte_size)) - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits - elif sig_type == v4c.SIGNAL_TYPE_CANOPEN: names = signal.dtype.names @@ -5906,18 +5906,7 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No fields.append((vals, 7)) - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits - elif sig_type == v4c.SIGNAL_TYPE_STRUCTURE_COMPOSITION: - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits if not signal.flags["C_CONTIGUOUS"]: signal = np.ascontiguousarray(signal) @@ -5940,12 +5929,6 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No fields.append((samples, size)) - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits - for name in names[1:]: samples = signal[name] shape = samples.shape[1:] @@ -5959,12 +5942,6 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No fields.append((samples, size)) - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits - else: if self.compact_vlsd: cur_offset = sum(blk.original_size for blk in gp.get_signal_data_blocks(i)) @@ -6038,19 +6015,9 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No offsets = np.ascontiguousarray(offsets) fields.append((offsets, 8)) - if invalidation_bytes_nr and invalidation_bits is not None: - if (origin := invalidation_bits.origin) == InvalidationArray.ORIGIN_UNKNOWN: - inval_bits[origin].append(invalidation_bits) - else: - inval_bits[origin] = invalidation_bits - if invalidation_bytes_nr: unknown_origin = inval_bits.pop(InvalidationArray.ORIGIN_UNKNOWN) - _pos_map = {key: idx for idx, key in enumerate(inval_bits)} - - _unknown_pos_map = deque(list(range(len(inval_bits), len(inval_bits) + len(unknown_origin)))) - inval_bits = list(inval_bits.values()) + unknown_origin cycles_nr = len(inval_bits[0]) invalidation_bytes_nr = len(inval_bits) @@ -8336,6 +8303,9 @@ def _yield_selected_signals( except: break + if perf_counter() - tt > 120: + 1/0 + # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) self._set_temporary_master(_master) From 00f2ab5ffb8e93256c620f33d5a1ff0c67685d46 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 11 Dec 2024 10:38:59 +0200 Subject: [PATCH 09/60] parallel data_block_from_arrays --- src/asammdf/blocks/cutils.c | 143 +++++++++++++++++++++++++++-------- src/asammdf/blocks/mdf_v4.py | 51 +++++++------ src/asammdf/signal.py | 52 +++++++++---- 3 files changed, 173 insertions(+), 73 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 3f058bcd0..d478686c1 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1569,18 +1569,70 @@ struct dtype int64_t itemsize; }; -static PyObject *data_block_from_arrays(PyObject *self, PyObject *args) + +static PyObject *data_block_from_arrays_C(void *lpParam ) { Py_ssize_t size; PyObject *data_blocks, *out = NULL, *item, *array, *copy_array, *itemsize, *cycles_obj; + char *read_pos = NULL, *write_pos = NULL; + Py_ssize_t total_size = 0, record_size = 0, + cycles, byte_count = 0, step; + Py_ssize_t isize = 0, offset = 0; + + PMYDATA data; + PMyChannelInfo indata; + indata = (PMyChannelInfo) lpParam; + + Py_ssize_t signal_count, thread_idx; + signal_count = indata->count; + thread_idx = indata->idx; + data = indata->data; + for (Py_ssize_t i = 0; irecord_size; + step = record_size - data->byte_count; + cycles = data->size; + byte_count = data->byte_count; + inptr = data->inptr; + + if (!record_size) continue; + + outptr = data->outptr + data->byte_offset; + + for (Py_ssize_t i=0; i 120: - 1/0 + x = 1 / 0 # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) @@ -8322,7 +8325,7 @@ def _yield_selected_signals( # print(f'Size = {len(fragment[0]) / 1024 / 1024:.3f} MB') - if len(channels) >= 100: + if 1 and len(channels) >= 100: # prepare the invalidation bytes for this group and fragment invalidation_bytes = get_channel_raw_bytes( fragment[0], diff --git a/src/asammdf/signal.py b/src/asammdf/signal.py index 022277224..3e6d5e479 100644 --- a/src/asammdf/signal.py +++ b/src/asammdf/signal.py @@ -575,9 +575,12 @@ def cut( samples = np.append(self.samples[:stop], interpolated.samples, axis=0) timestamps = np.append(self.timestamps[:stop], interpolated.timestamps) if self.invalidation_bits is not None: - invalidation_bits = np.append( - self.invalidation_bits[:stop], - interpolated.invalidation_bits, + invalidation_bits = InvalidationArray( + np.append( + self.invalidation_bits[:stop], + interpolated.invalidation_bits, + ), + self.invalidation_bits.origin, ) else: invalidation_bits = None @@ -650,9 +653,12 @@ def cut( samples = np.append(interpolated.samples, self.samples[start:], axis=0) timestamps = np.append(interpolated.timestamps, self.timestamps[start:]) if self.invalidation_bits is not None: - invalidation_bits = np.append( - interpolated.invalidation_bits, - self.invalidation_bits[start:], + invalidation_bits = InvalidationArray( + np.append( + interpolated.invalidation_bits, + self.invalidation_bits[start:], + ), + self.invalidation_bits.origin, ) else: invalidation_bits = None @@ -738,7 +744,7 @@ def cut( samples = np.array([], dtype=self.samples.dtype) timestamps = np.array([], dtype=self.timestamps.dtype) if self.invalidation_bits is not None: - invalidation_bits = np.array([], dtype=bool) + invalidation_bits = self.invalidation_bits[0:0] else: invalidation_bits = None else: @@ -764,9 +770,12 @@ def cut( samples = np.append(samples, interpolated.samples, axis=0) timestamps = np.append(timestamps, interpolated.timestamps) if invalidation_bits is not None: - invalidation_bits = np.append( - invalidation_bits, - interpolated.invalidation_bits, + invalidation_bits = InvalidationArray( + np.append( + invalidation_bits, + interpolated.invalidation_bits, + ), + interpolated.invalidation_bits.origin, ) if ( @@ -785,9 +794,12 @@ def cut( timestamps = np.append(interpolated.timestamps, timestamps) if invalidation_bits is not None: - invalidation_bits = np.append( - interpolated.invalidation_bits, - invalidation_bits, + invalidation_bits = InvalidationArray( + np.append( + interpolated.invalidation_bits, + invalidation_bits, + ), + interpolated.invalidation_bits.origin, ) if samples.dtype != self.samples.dtype: @@ -844,11 +856,19 @@ def extend(self, other: Signal) -> Signal: if self.invalidation_bits is None and other.invalidation_bits is None: invalidation_bits = None elif self.invalidation_bits is None and other.invalidation_bits is not None: - invalidation_bits = np.concatenate((np.zeros(len(self), dtype=bool), other.invalidation_bits)) + invalidation_bits = InvalidationArray( + np.concatenate((np.zeros(len(self), dtype=bool), other.invalidation_bits)), + other.invalidation_bits.origin, + ) elif self.invalidation_bits is not None and other.invalidation_bits is None: - invalidation_bits = np.concatenate((self.invalidation_bits, np.zeros(len(other), dtype=bool))) + invalidation_bits = InvalidationArray( + np.concatenate((self.invalidation_bits, np.zeros(len(other), dtype=bool))), + self.invalidation_bits.origin, + ) else: - invalidation_bits = np.append(self.invalidation_bits, other.invalidation_bits) + invalidation_bits = InvalidationArray( + np.append(self.invalidation_bits, other.invalidation_bits), self.invalidation_bits.origin + ) result = Signal( np.append(self.samples, other.samples, axis=0), From e4521dbfbccdfa4f7b596de5327998c58ae95cb5 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 11 Dec 2024 12:13:59 +0200 Subject: [PATCH 10/60] use store_size to optimize the decompression --- src/asammdf/blocks/mdf_v4.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index f09326e23..07d5d2b8e 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -3793,7 +3793,7 @@ def append( for i in range(count): data_ = samples[i * block_size : (i + 1) * block_size] raw_size = len(data_) - data_ = lz_compress(data_) + data_ = lz_compress(data_, store_size=True) size = len(data_) data_address = self._tempfile.tell() @@ -3816,7 +3816,7 @@ def append( data = samples raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) self._tempfile.write(data) @@ -3835,7 +3835,7 @@ def append( addr = tell() data = inval_bits.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) self._tempfile.write(data) @@ -4634,7 +4634,7 @@ def _append_column_oriented( data = samples.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) write(data) @@ -4653,7 +4653,7 @@ def _append_column_oriented( addr = tell() data = invalidation_bits.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) write(data) @@ -6049,7 +6049,7 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No if self.version < "4.20": data = samples raw_size = size - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) stream.write(data) gp.data_blocks.append( @@ -6068,7 +6068,7 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No else: data = samples raw_size = size - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) stream.write(data) @@ -6090,7 +6090,7 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No data = inval_bits.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) stream.write(data) @@ -6212,7 +6212,7 @@ def _extend_column_oriented(self, index: int, signals: list[tuple[NDArray[Any], if added_cycles: data = samples.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) write(data) @@ -6233,7 +6233,7 @@ def _extend_column_oriented(self, index: int, signals: list[tuple[NDArray[Any], addr = tell() data = invalidation_bits.tobytes() raw_size = len(data) - data = lz_compress(data) + data = lz_compress(data, store_size=True) size = len(data) write(data) @@ -10480,7 +10480,7 @@ def _sort( original_size = len(new_data) if original_size: if compress: - new_data = lz_compress(new_data) + new_data = lz_compress(new_data, store_size=True) compressed_size = len(new_data) write(new_data) @@ -10562,7 +10562,7 @@ def _sort( original_size = len(new_data) if original_size: if compress: - new_data = lz_compress(new_data) + new_data = lz_compress(new_data, store_size=True) compressed_size = len(new_data) write(new_data) From bf23b48662f60327abb8fcd56a9c176eb91142ae Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 11 Dec 2024 16:08:16 +0200 Subject: [PATCH 11/60] small fixes --- src/asammdf/blocks/mdf_v4.py | 53 ++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 07d5d2b8e..967f7a3c5 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -3784,9 +3784,11 @@ def append( del fields + record_size = offset + invalidation_bytes_nr + if size: if self.version < "4.20": - block_size = self._write_fragment_size or 20 * 1024 * 1024 + block_size = 32 * 1024 * 1024 // record_size * record_size count = ceil(size / block_size) @@ -3810,7 +3812,6 @@ def append( ) else: - data_address = self._tempfile.tell() gp.uses_ld = True data_address = tell() @@ -6039,43 +6040,55 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No samples = data_block_from_arrays(fields, added_cycles) size = len(samples) + samples = memoryview(samples) del fields stream.seek(0, 2) addr = stream.tell() + record_size = gp.channel_group.samples_byte_nr + gp.channel_group.invalidation_bytes_nr + if size: if self.version < "4.20": - data = samples - raw_size = size - data = lz_compress(data, store_size=True) - size = len(data) - stream.write(data) - gp.data_blocks.append( - DataBlockInfo( - address=addr, - block_type=v4c.DZ_BLOCK_LZ, - original_size=raw_size, - compressed_size=size, - param=0, + block_size = 32 * 1024 * 1024 // record_size * record_size + + count = ceil(size / block_size) + + print(f"{size / 1024 / 1024: .3f} MB -> {count=} x {block_size / 1024 / 1024: .3f} MB") + + for i in range(count): + data_ = samples[i * block_size : (i + 1) * block_size] + raw_size = len(data_) + data_ = lz_compress(data_, store_size=True) + + size = len(data_) + data_address = self._tempfile.tell() + self._tempfile.write(data_) + + gp.data_blocks.append( + DataBlockInfo( + address=data_address, + block_type=v4c.DZ_BLOCK_LZ, + original_size=raw_size, + compressed_size=size, + param=0, + ) ) - ) gp.channel_group.cycles_nr += added_cycles self.virtual_groups[index].cycles_nr += added_cycles else: - data = samples raw_size = size - data = lz_compress(data, store_size=True) + data = lz_compress(samples, store_size=True) size = len(data) stream.write(data) gp.data_blocks.append( DataBlockInfo( address=addr, - block_type=v4c.DT_BLOCK_LZ, + block_type=v4c.DZ_BLOCK_LZ, original_size=raw_size, compressed_size=size, param=0, @@ -6097,10 +6110,10 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No gp.data_blocks[-1].invalidation_block( InvalidationBlockInfo( address=addr, - block_type=v4c.DT_BLOCK_LZ, + block_type=v4c.DZ_BLOCK_LZ, original_size=raw_size, compressed_size=size, - param=None, + param=0, ) ) From 3154690a9f341221085bed5e401722844f5581fa Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 11 Dec 2024 21:00:51 +0200 Subject: [PATCH 12/60] Timer class --- src/asammdf/blocks/cutils.c | 2 +- src/asammdf/blocks/mdf_v4.py | 4 ---- src/asammdf/blocks/utils.py | 26 ++++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index d478686c1..09a800bd4 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1323,7 +1323,6 @@ static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) } } - static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) { Py_ssize_t count, size, actual_byte_count, delta, invalidation_pos, invalidation_size; @@ -1368,6 +1367,7 @@ static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) } } + typedef struct MyData { uint8_t * inptr; Py_ssize_t size; diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 967f7a3c5..0bb60e905 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -6055,8 +6055,6 @@ def extend(self, index: int, signals: list[tuple[NDArray[Any], NDArray[Any] | No count = ceil(size / block_size) - print(f"{size / 1024 / 1024: .3f} MB -> {count=} x {block_size / 1024 / 1024: .3f} MB") - for i in range(count): data_ = samples[i * block_size : (i + 1) * block_size] raw_size = len(data_) @@ -8336,8 +8334,6 @@ def _yield_selected_signals( if not grp.single_channel_dtype: self._prepare_record(grp) - # print(f'Size = {len(fragment[0]) / 1024 / 1024:.3f} MB') - if 1 and len(channels) >= 100: # prepare the invalidation bytes for this group and fragment invalidation_bytes = get_channel_raw_bytes( diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index f21eae71f..95065dec6 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -2413,3 +2413,29 @@ def timed(*args, **kwargs): return ret return timed + + +class Timer: + + def __init__(self, name=""): + self.name = name or str(id(self)) + self.count = 0 + self.total_time = 0.0 + + def __enter__(self): + now = perf_counter() + self.start = now + return self + + def __exit__(self, type, value, traceback): + now = perf_counter() + self.total_time += now - self.start + self.count += 1 + + def display(self): + print( + f"""TIMER {self.name}: +\t* {self.count} iterations in {self.total_time * 1000:.3f}ms +\t* {self.count / self.total_time:.3f} iter/s +\t* {self.total_time / self.count * 1000:.3f} ms/iter""" + ) From c91348a16a84b338f7cf2e69e97e9b7558a3536f Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 11 Dec 2024 22:12:59 +0200 Subject: [PATCH 13/60] truely parallel get raw bytes --- src/asammdf/blocks/cutils.c | 45 ++++++++++++++++--------------------- src/asammdf/blocks/utils.py | 11 +++++++-- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 09a800bd4..1d7e7e3f8 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1401,12 +1401,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) uint8_t *outptr, *inptr; for (int idx = thread_idx; idx < signal_count; idx += MAX_THREADS) { - if (!data->record_size) - { - data->outptr = NULL; - data->out_size = 0; - } - else if (data->record_size < data->byte_offset + data->byte_count) + if (data->record_size < data->byte_offset + data->byte_count) { inptr = data->inptr; delta = data->byte_offset + data->byte_count - data->record_size; @@ -1414,10 +1409,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) count = data->size / data->record_size; - outptr = (uint8_t *) malloc(count * data->byte_count); - data->outptr = outptr; - data->out_size = count * data->byte_count; - + outptr = data->outptr; inptr += data->byte_offset; for (Py_ssize_t i = 0; i < count; i++) @@ -1436,10 +1428,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) { inptr = data->inptr; count = data->size / data->record_size; - outptr = (uint8_t *) malloc(count * data->byte_count); - data->outptr = outptr; - data->out_size = count * data->byte_count; - + outptr = data->outptr; inptr += data->byte_offset; delta = data->record_size - data->byte_count; @@ -1461,7 +1450,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) { - Py_ssize_t count, size, actual_byte_count, delta; + Py_ssize_t count, size, actual_byte_count, delta, cycles; PyObject *data_block, *out, *signals, *obj; Py_ssize_t record_size, byte_offset, byte_count; @@ -1485,6 +1474,7 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) } else { + if (PyBytes_Check(data_block)) { size = PyBytes_Size(data_block); inptr = PyBytes_AsString(data_block); @@ -1494,6 +1484,8 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) inptr = PyByteArray_AsString(data_block); } + cycles = size / record_size; + signal_count = PyList_Size(signals); pDataArray = (PMYDATA) malloc(sizeof(MYDATA) * signal_count); ch_info = (PMyChannelInfo) malloc(sizeof(MyChannelInfo) * MAX_THREADS); @@ -1503,6 +1495,8 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) ch_info[i].idx = i; } + + out = PyList_New(signal_count); for (int i=0; i Date: Wed, 11 Dec 2024 23:04:20 +0200 Subject: [PATCH 14/60] start rework on the get_invalidation_bits --- src/asammdf/blocks/cutils.c | 15 ++++----- src/asammdf/blocks/mdf_v4.py | 62 ++++++++++++++++++++---------------- src/asammdf/blocks/utils.py | 8 ++--- 3 files changed, 44 insertions(+), 41 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 1d7e7e3f8..9c68957db 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1375,7 +1375,7 @@ typedef struct MyData { Py_ssize_t byte_offset; Py_ssize_t byte_count; uint8_t * outptr; - Py_ssize_t out_size; + Py_ssize_t cycles; } MYDATA, *PMYDATA; typedef struct ChannelInfo { @@ -1407,7 +1407,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) delta = data->byte_offset + data->byte_count - data->record_size; actual_byte_count = data->record_size - data->byte_offset; - count = data->size / data->record_size; + count = data->cycles; outptr = data->outptr; inptr += data->byte_offset; @@ -1427,7 +1427,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) else { inptr = data->inptr; - count = data->size / data->record_size; + count = data->cycles; outptr = data->outptr; inptr += data->byte_offset; @@ -1505,7 +1505,7 @@ static PyObject *get_channel_raw_bytes_parallel(PyObject *self, PyObject *args) pDataArray[i].record_size = record_size; pDataArray[i].byte_offset = PyLong_AsSsize_t(PyList_GetItem(obj, 0)); pDataArray[i].byte_count = PyLong_AsSsize_t(PyList_GetItem(obj, 1)); - pDataArray[i].out_size=0; + pDataArray[i].cycles=cycles; obj = PyByteArray_FromStringAndSize(NULL, pDataArray[i].byte_count * cycles); pDataArray[i].outptr= (uint8_t *) PyByteArray_AsString(obj); @@ -1588,7 +1588,7 @@ static PyObject *data_block_from_arrays_C(void *lpParam ) for (Py_ssize_t idx = thread_idx; idx < signal_count; idx += MAX_THREADS) { record_size = data->record_size; step = record_size - data->byte_count; - cycles = data->size; + cycles = data->cycles; byte_count = data->byte_count; inptr = data->inptr; @@ -1664,12 +1664,9 @@ static PyObject *data_block_from_arrays(PyObject *self, PyObject *args) itemsize = PyTuple_GetItem(item, 1); pDataArray[i].inptr = (uint8_t *)PyArray_BYTES((PyArrayObject *)array); - pDataArray[i].size = cycles; - pDataArray[i].record_size = 0; + pDataArray[i].cycles = cycles; pDataArray[i].byte_offset = total_size; pDataArray[i].byte_count = PyLong_AsSsize_t(itemsize); - pDataArray[i].outptr=NULL; - pDataArray[i].out_size=0; total_size += pDataArray[i].byte_count; diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 0bb60e905..31cccc7d3 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -2598,7 +2598,7 @@ def _filter_occurrences( def get_invalidation_bits( self, group_index: int, - channel: Channel, + pos_invalidation_bit: int, fragment: tuple[bytes, int, int, ReadableBufferType | None], ) -> NDArray[bool_]: """get invalidation indexes for the channel @@ -2607,8 +2607,8 @@ def get_invalidation_bits( ---------- group_index : int group index - channel : Channel - channel object + pos_invalidation_bit : int + channel invalidation bit position fragment : (bytes, int) (fragment bytes, fragment offset) @@ -2623,29 +2623,29 @@ def get_invalidation_bits( data_bytes, offset, _count, invalidation_bytes = fragment invalidation_bytes_nr = group.channel_group.invalidation_bytes_nr - ch_invalidation_pos = channel.pos_invalidation_bit if invalidation_bytes is None: - try: - invalidation_bytes = self._invalidation_cache[(group_index, offset, _count)] - except KeyError: - - record = group.record - if record is None: - self._prepare_record(group) + record = group.record + if record is None: + self._prepare_record(group) + + invalidation_bytes = get_channel_raw_bytes( + data_bytes, + group.channel_group.samples_byte_nr + group.channel_group.invalidation_bytes_nr, + group.channel_group.samples_byte_nr, + invalidation_bytes_nr, + ) - invalidation_bytes = get_channel_raw_bytes( - data_bytes, - group.channel_group.samples_byte_nr + group.channel_group.invalidation_bytes_nr, - group.channel_group.samples_byte_nr, - invalidation_bytes_nr, + key = (group_index, offset, _count, pos_invalidation_bit) + if key not in self._invalidation_cache: + for i in range(invalidation_bytes_nr * 8): + self._invalidation_cache[(group_index, offset, _count, i)] = InvalidationArray( + get_invalidation_bits_array(invalidation_bytes, invalidation_bytes_nr, pos_invalidation_bit), + (group_index, pos_invalidation_bit), ) + invalidation_bits = self._invalidation_cache[key] - self._invalidation_cache[(group_index, offset, _count)] = invalidation_bytes - - invalidation_bits = get_invalidation_bits_array(invalidation_bytes, invalidation_bytes_nr, ch_invalidation_pos) - - return InvalidationArray(invalidation_bits, (group_index, ch_invalidation_pos)) + return self._invalidation_cache[key] def append( self, @@ -6989,7 +6989,7 @@ def _get_structure( if master_is_required: timestamps.append(self.get_master(gp_nr, fragment, one_piece=True)) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment)) count += 1 else: @@ -7018,7 +7018,7 @@ def _get_structure( if master_is_required: timestamps.append(self.get_master(gp_nr, fragment, one_piece=True)) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment)) count += 1 @@ -7383,7 +7383,7 @@ def _get_array( if master_is_required: timestamps.append(self.get_master(gp_nr, fragment, one_piece=True)) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment)) channel_values.append(vals) count += 1 @@ -7516,7 +7516,7 @@ def _get_scalar( ) ) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment)) channel_values.append(vals) count += 1 @@ -7644,7 +7644,7 @@ def _get_scalar( timestamps = None if channel_invalidation_present: - invalidation_bits = self.get_invalidation_bits(gp_nr, channel, fragment) + invalidation_bits = self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment) if not ignore_invalidation_bits: vals = vals[nonzero(~invalidation_bits)[0]] @@ -7672,7 +7672,9 @@ def _get_scalar( if master_is_required: timestamps.append(self.get_master(gp_nr, fragment, one_piece=True)) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append( + self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment) + ) channel_values.append(vals) vals = concatenate(channel_values) @@ -7700,7 +7702,9 @@ def _get_scalar( if master_is_required: timestamps.append(self.get_master(gp_nr, fragment, one_piece=True)) if channel_invalidation_present: - invalidation_bits.append(self.get_invalidation_bits(gp_nr, channel, fragment)) + invalidation_bits.append( + self.get_invalidation_bits(gp_nr, channel.pos_invalidation_bit, fragment) + ) if count > 1: buffer = bytearray().join(buffer) @@ -8334,6 +8338,8 @@ def _yield_selected_signals( if not grp.single_channel_dtype: self._prepare_record(grp) + self._invalidation_cache.clear() + if 1 and len(channels) >= 100: # prepare the invalidation bytes for this group and fragment invalidation_bytes = get_channel_raw_bytes( diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index ec3b76964..f3199f23b 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -2434,15 +2434,15 @@ def __exit__(self, type, value, traceback): def display(self): if self.count: - for factor, r, unit in ((1e3, 3, 'ms'), (1e6, 6, 'us'), (1e9, 9, 'ns')): + for factor, r, unit in ((1e3, 3, "ms"), (1e6, 6, "us"), (1e9, 9, "ns")): tpi = round(self.total_time / self.count, r) if tpi: break print( - f"""TIMER {self.name}: + f"""TIMER {self.name}: \t* {self.count} iterations in {self.total_time * 1000:.3f}ms \t* {self.count / self.total_time:.3f} iter/s \t* {self.total_time / self.count * factor:.3f} {unit}/iter""" - ) + ) else: - print(f'TIMER {self.name}:\n\t* inactive') + print(f"TIMER {self.name}:\n\t* inactive") From adbc444f43e21efd35c8739c3e8c04a07d7a0ea5 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 12 Dec 2024 08:09:28 +0200 Subject: [PATCH 15/60] fix --- src/asammdf/blocks/mdf_v4.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 31cccc7d3..854ad399a 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -3357,7 +3357,7 @@ def append( inval_bits, ) fields.extend(new_fields) - gp_sig_types.append((sig_type, signal.dtype.itemsize)) + gp_sig_types.append((sig_type, signal.samples.dtype.itemsize)) elif sig_type == v4c.SIGNAL_TYPE_ARRAY: # here we have channel arrays or mdf v3 channel dependencies From 94618b2e7202999c2053a77c0dcf83299bc8a68d Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 12 Dec 2024 08:17:01 +0200 Subject: [PATCH 16/60] void * for pthread target --- src/asammdf/blocks/cutils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 9c68957db..48629b784 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1563,7 +1563,7 @@ struct dtype }; -static PyObject *data_block_from_arrays_C(void *lpParam ) +void * data_block_from_arrays_C(void *lpParam ) { Py_ssize_t size; PyObject *data_blocks, *out = NULL, *item, *array, *copy_array, *itemsize, *cycles_obj; From 75141365dff2b09b50829b64d23e688436a5f8e0 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 12 Dec 2024 12:59:25 +0200 Subject: [PATCH 17/60] some performance tweaks --- src/asammdf/blocks/mdf_v4.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 854ad399a..276524043 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -1419,6 +1419,9 @@ def _load_data( if record_count is not None: invalidation_record_count = record_count * invalidation_size record_count *= samples_size + max_size = record_count + invalidation_record_count + else: + max_size = (invalidation_size + samples_size) * channel_group.cycles_nr if not samples_size: if rm: @@ -1453,6 +1456,9 @@ def _load_data( invalidation_split_size = invalidation_size split_size = int(split_size) + if split_size > max_size: + invalidation_split_size = (max_size // samples_size) * invalidation_size + split_size = max_size buffer = bytearray(split_size) buffer_view = memoryview(buffer) @@ -2643,7 +2649,6 @@ def get_invalidation_bits( get_invalidation_bits_array(invalidation_bytes, invalidation_bytes_nr, pos_invalidation_bit), (group_index, pos_invalidation_bit), ) - invalidation_bits = self._invalidation_cache[key] return self._invalidation_cache[key] @@ -6851,7 +6856,6 @@ def get( ) if all_invalid: - invalidation_bits = np.ones(len(vals), dtype=bool) if samples_only: @@ -6929,6 +6933,9 @@ def get( debug_channel(self, grp, channel, dependency_list) raise + if data is None: + self._invalidation_cache.clear() + return res def _get_structure( @@ -8321,7 +8328,7 @@ def _yield_selected_signals( except: break - if perf_counter() - tt > 120: + if perf_counter() - tt > 180: x = 1 / 0 # prepare the master @@ -8364,6 +8371,7 @@ def _yield_selected_signals( raw=True, ignore_invalidation_bits=True, samples_only=False, + skip_channel_validation=True, ) signals.append(signal) @@ -8377,6 +8385,7 @@ def _yield_selected_signals( raw=True, ignore_invalidation_bits=True, samples_only=True, + skip_channel_validation=True, ) signals.append((signal, invalidation_bits)) @@ -8454,6 +8463,7 @@ def _yield_selected_signals( yield signals grp.read_split_count = 0 + self._invalidation_cache.clear() def get_master( self, From fef55c9cc30ffdfacd3f1ba22c9b41d8c31d4fb5 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 12 Dec 2024 16:21:49 +0200 Subject: [PATCH 18/60] fast_path ? --- src/asammdf/blocks/mdf_v4.py | 106 ++++++++++++++++++++++++-------- src/asammdf/blocks/v4_blocks.py | 2 + 2 files changed, 83 insertions(+), 25 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 276524043..6b172f389 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -164,6 +164,7 @@ # 100 extra steps for the sorting, 1 step after sorting and 1 step at finish SORT_STEPS = 102 +DATA_IS_CHANNEL_BYTES = [-2, -2] logger = logging.getLogger("asammdf") @@ -2628,27 +2629,27 @@ def get_invalidation_bits( group = self.groups[group_index] data_bytes, offset, _count, invalidation_bytes = fragment - invalidation_bytes_nr = group.channel_group.invalidation_bytes_nr - + if invalidation_bytes is None: + invalidation_bytes_nr = group.channel_group.invalidation_bytes_nr + samples_byte_nr = group.channel_group.samples_byte_nr record = group.record if record is None: self._prepare_record(group) invalidation_bytes = get_channel_raw_bytes( data_bytes, - group.channel_group.samples_byte_nr + group.channel_group.invalidation_bytes_nr, - group.channel_group.samples_byte_nr, + samples_byte_nr + invalidation_bytes_nr, + samples_byte_nr, invalidation_bytes_nr, ) key = (group_index, offset, _count, pos_invalidation_bit) if key not in self._invalidation_cache: - for i in range(invalidation_bytes_nr * 8): - self._invalidation_cache[(group_index, offset, _count, i)] = InvalidationArray( - get_invalidation_bits_array(invalidation_bytes, invalidation_bytes_nr, pos_invalidation_bit), - (group_index, pos_invalidation_bit), - ) + self._invalidation_cache[key] = InvalidationArray( + get_invalidation_bits_array(invalidation_bytes, group.channel_group.invalidation_bytes_nr, pos_invalidation_bit), + (group_index, pos_invalidation_bit), + ) return self._invalidation_cache[key] @@ -6841,19 +6842,28 @@ def get( ) else: - vals, timestamps, invalidation_bits, encoding = self._get_scalar( - channel=channel, - group=grp, - group_index=gp_nr, - channel_index=ch_nr, - dependency_list=dependency_list, - raster=raster, - data=data, - ignore_invalidation_bits=ignore_invalidation_bits, - record_offset=record_offset, - record_count=record_count, - master_is_required=master_is_required, - ) + if ( + data + and (fast_path := channel.fast_path) is not None + and not master_is_required + and ignore_invalidation_bits + and not raster + ): + vals, timestamps, invalidation_bits, encoding = self._fast_scalar_path(*fast_path, data) + else: + vals, timestamps, invalidation_bits, encoding = self._get_scalar( + channel=channel, + group=grp, + group_index=gp_nr, + channel_index=ch_nr, + dependency_list=dependency_list, + raster=raster, + data=data, + ignore_invalidation_bits=ignore_invalidation_bits, + record_offset=record_offset, + record_count=record_count, + master_is_required=master_is_required, + ) if all_invalid: invalidation_bits = np.ones(len(vals), dtype=bool) @@ -7447,6 +7457,39 @@ def _get_array( return vals, timestamps, invalidation_bits, None + def _fast_scalar_path( + self, + gp_nr, + record_size, + byte_offset, + byte_size, + pos_invalidation_bit, + # data_type, + # channel_type, + # bit_count, + dtype, + data, + ): + data_bytes, *rec_key, invalidation_bytes = data + if rec_key != DATA_IS_CHANNEL_BYTES: + buffer = get_channel_raw_bytes( + data_bytes, + record_size, + byte_offset, + byte_size, + ) + else: + buffer = data_bytes + + vals = frombuffer(buffer, dtype=dtype) + + if pos_invalidation_bit >= 0: + invalidation_bits = self.get_invalidation_bits(gp_nr, pos_invalidation_bit, data) + else: + invalidation_bits = None + + return vals, None, invalidation_bits, None + def _get_scalar( self, channel: Channel, @@ -7462,10 +7505,8 @@ def _get_scalar( master_is_required: bool, skip_vlsd: bool = False, ) -> tuple[NDArray[Any], NDArray[Any] | None, NDArray[Any] | None, str | None]: - grp = group - gp_nr = group_index - ch_nr = channel_index + grp = group # get group data if data is None: data = self._load_data(grp, record_offset=record_offset, record_count=record_count) @@ -7473,6 +7514,9 @@ def _get_scalar( else: one_piece = True + gp_nr = group_index + ch_nr = channel_index + channel_invalidation_present = channel.flags & (v4c.FLAG_CN_ALL_INVALID | v4c.FLAG_CN_INVALIDATION_PRESENT) data_type = channel.data_type @@ -7638,6 +7682,18 @@ def _get_scalar( view = f"{channel_dtype.byteorder}i{vals.itemsize}" if dtype(view) != vals.dtype: vals = vals.view(view) + elif channel_type == v4c.CHANNEL_TYPE_VALUE and channel.fast_path is None: + channel.fast_path = ( + gp_nr, + record_size + channel_group.invalidation_bytes_nr, + byte_offset, + byte_size, + channel.pos_invalidation_bit if channel_invalidation_present else -1, + # data_type, + # channel_type, + # bit_count, + dtype_, + ) else: vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr) diff --git a/src/asammdf/blocks/v4_blocks.py b/src/asammdf/blocks/v4_blocks.py index 8eef85d9c..2d53ac2f9 100644 --- a/src/asammdf/blocks/v4_blocks.py +++ b/src/asammdf/blocks/v4_blocks.py @@ -476,6 +476,7 @@ class Channel: "default_X_dg_addr", "display_names", "dtype_fmt", + "fast_path", "flags", "id", "links_nr", @@ -1020,6 +1021,7 @@ def __init__(self, **kwargs) -> None: del self.display_names[self.name] self.standard_C_size = True + self.fast_path = None def __getitem__(self, item: str) -> Any: return self.__getattribute__(item) From 2ba4427cb50f7c6c722f72565a354d2ea51cef2a Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 12 Dec 2024 17:43:02 +0200 Subject: [PATCH 19/60] optimize for performance --- src/asammdf/blocks/cutils.c | 2 +- src/asammdf/blocks/options.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 48629b784..53f825ae6 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -9,7 +9,7 @@ #include #include #include -#define MAX_THREADS 4 +#define MAX_THREADS 12 #if defined(_WIN32) #include diff --git a/src/asammdf/blocks/options.py b/src/asammdf/blocks/options.py index 8db147dc0..d76e317bd 100644 --- a/src/asammdf/blocks/options.py +++ b/src/asammdf/blocks/options.py @@ -14,7 +14,7 @@ class FloatInterpolation(IntEnum): _GLOBAL_OPTIONS = { - "read_fragment_size": 64 * 1024 * 1024, + "read_fragment_size": 256 * 1024 * 1024, "write_fragment_size": 4 * 1024 * 1024, "use_display_names": True, "single_bit_uint_as_bool": False, From 99b337588576062021bd7f7329b67488a0974f3f Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 13 Dec 2024 10:41:13 +0200 Subject: [PATCH 20/60] control the thread count for get_channel_raw_bytes_parallel and data_block_from_arrays --- src/asammdf/blocks/cutils.c | 193 +++++++++++++++++++++++------------ src/asammdf/blocks/mdf_v4.py | 55 ++++------ src/asammdf/mdf.py | 4 +- 3 files changed, 150 insertions(+), 102 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 53f825ae6..093eb27d1 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1370,11 +1370,10 @@ static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) typedef struct MyData { uint8_t * inptr; - Py_ssize_t size; + uint8_t * outptr; Py_ssize_t record_size; Py_ssize_t byte_offset; - Py_ssize_t byte_count; - uint8_t * outptr; + Py_ssize_t byte_count; Py_ssize_t cycles; } MYDATA, *PMYDATA; @@ -1382,12 +1381,13 @@ typedef struct ChannelInfo { PMYDATA data; Py_ssize_t count; Py_ssize_t idx; + Py_ssize_t thread_count; } MyChannelInfo, *PMyChannelInfo; void * get_channel_raw_bytes_C(void *lpParam ) { - Py_ssize_t count, actual_byte_count, delta; + Py_ssize_t count, actual_byte_count, delta, thread_count; PMYDATA data; PMyChannelInfo indata; indata = (PMyChannelInfo) lpParam; @@ -1395,12 +1395,13 @@ void * get_channel_raw_bytes_C(void *lpParam ) Py_ssize_t signal_count, thread_idx; signal_count = indata->count; thread_idx = indata->idx; + thread_count = indata->thread_count; data = indata->data; for (Py_ssize_t i = 0; irecord_size < data->byte_offset + data->byte_count) { inptr = data->inptr; @@ -1441,7 +1442,7 @@ void * get_channel_raw_bytes_C(void *lpParam ) } } - for (Py_ssize_t i = 0; icount; thread_idx = indata->idx; data = indata->data; + thread_count= indata->thread_count; for (Py_ssize_t i = 0; irecord_size; step = record_size - data->byte_count; cycles = data->cycles; @@ -1602,39 +1638,49 @@ void * data_block_from_arrays_C(void *lpParam ) outptr += step; } - for (Py_ssize_t i = 0; i 180: + if perf_counter() - tt > 120: x = 1 / 0 # prepare the master @@ -8416,6 +8419,7 @@ def _yield_selected_signals( fragment[0], grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, group_info[group_index], + THREAD_COUNT, ) if idx == 0: @@ -8525,7 +8529,6 @@ def get_master( self, index: int, data: bytes | None = None, - raster: RasterType | None = None, record_offset: int = 0, record_count: int | None = None, one_piece: bool = False, @@ -8538,10 +8541,6 @@ def get_master( group index data : (bytes, int, int, bytes|None) (data block raw bytes, fragment offset, count, invalidation bytes); default None - raster : float - raster to be used for interpolation; default None - - .. deprecated:: 5.13.0 record_offset : int if *data=None* use this to select the record offset from which the @@ -8557,10 +8556,6 @@ def get_master( """ - if raster is not None: - raise PendingDeprecationWarning( - "the argument raster is deprecated since version 5.13.0 " "and will be removed in a future release" - ) if self._master is not None: return self._master @@ -8720,17 +8715,7 @@ def get_master( if t.dtype != float64: t = t.astype(float64) - if raster and t.size: - timestamps = t - if len(t) > 1: - num = float(float32((timestamps[-1] - timestamps[0]) / raster)) - if int(num) == num: - timestamps = linspace(t[0], t[-1], int(num)) - else: - timestamps = arange(t[0], t[-1], raster) - else: - timestamps = t - return timestamps + return t def get_bus_signal( self, @@ -10794,7 +10779,7 @@ def _process_can_logging(self, group_index: int, grp: Group) -> None: for fragment in data: self._set_temporary_master(None) - self._set_temporary_master(self.get_master(group_index, data=fragment)) + self._set_temporary_master(self.get_master(group_index, data=fragment, one_piece=True)) bus_ids = self.get( "CAN_DataFrame.BusChannel", @@ -10834,7 +10819,7 @@ def _process_can_logging(self, group_index: int, grp: Group) -> None: for fragment in data: self._set_temporary_master(None) - self._set_temporary_master(self.get_master(group_index, data=fragment)) + self._set_temporary_master(self.get_master(group_index, data=fragment, one_piece=True)) bus_ids = self.get( "CAN_DataFrame.BusChannel", @@ -10890,7 +10875,7 @@ def _process_can_logging(self, group_index: int, grp: Group) -> None: for fragment in data: self._set_temporary_master(None) - self._set_temporary_master(self.get_master(group_index, data=fragment)) + self._set_temporary_master(self.get_master(group_index, data=fragment, one_piece=True)) data_bytes = self.get( "CAN_DataFrame.DataBytes", @@ -11050,7 +11035,7 @@ def _process_lin_logging(self, group_index: int, grp: Group) -> None: for fragment in data: self._set_temporary_master(None) - self._set_temporary_master(self.get_master(group_index, data=fragment)) + self._set_temporary_master(self.get_master(group_index, data=fragment, one_piece=True)) msg_ids = ( self.get( @@ -11083,7 +11068,7 @@ def _process_lin_logging(self, group_index: int, grp: Group) -> None: for fragment in data: self._set_temporary_master(None) - self._set_temporary_master(self.get_master(group_index, data=fragment)) + self._set_temporary_master(self.get_master(group_index, data=fragment, one_piece=True)) msg_ids = self.get("LIN_Frame.ID", group=group_index, data=fragment).astype(" Date: Fri, 13 Dec 2024 10:43:31 +0200 Subject: [PATCH 21/60] remove the 2min limit --- src/asammdf/blocks/mdf_v4.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 3bd2262c4..f0164c4bc 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -8387,8 +8387,8 @@ def _yield_selected_signals( except: break - if perf_counter() - tt > 120: - x = 1 / 0 + # if perf_counter() - tt > 120: + # x = 1 / 0 # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) From f6f971a1709ec3030c535cb672a012314fa4a2c0 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 13 Dec 2024 14:04:50 +0200 Subject: [PATCH 22/60] getattr and setattr --- src/asammdf/blocks/mdf_v4.py | 16 +++---- src/asammdf/blocks/v4_blocks.py | 74 ++++++++++++++++----------------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index f0164c4bc..264c9446e 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -1959,14 +1959,14 @@ def _get_data_blocks_info( while address: dl = DataList(address=address, stream=stream, mapped=mapped) for i in range(dl.data_block_nr): - addr = dl[f"data_block_addr{i}"] + addr = getattr(dl, f"data_block_addr{i}") id_string, block_len = COMMON_SHORT_uf(stream, addr) # can be a DataBlock if id_string == block_type: size = block_len - 24 if size: - addr = addr + COMMON_SIZE + addr += COMMON_SIZE # split the DTBLOCK into chucks of up to 32MB while True: @@ -2002,7 +2002,7 @@ def _get_data_blocks_info( break # or a DataZippedBlock - elif id_string == b"##DZ": + else: ( zip_type, param, @@ -2038,7 +2038,7 @@ def _get_data_blocks_info( ld = ListData(address=address, stream=stream, mapped=mapped) has_invalidation = ld.flags & v4c.FLAG_LD_INVALIDATION_PRESENT for i in range(ld.data_block_nr): - addr = ld[f"data_block_addr_{i}"] + addr = getattr(ld, f"data_block_addr_{i}") id_string, block_len = COMMON_SHORT_uf(stream, addr) # can be a DataBlock @@ -2243,7 +2243,7 @@ def _get_data_blocks_info( while address: dl = DataList(address=address, stream=stream) for i in range(dl.data_block_nr): - addr = dl[f"data_block_addr{i}"] + addr = getattr(dl, f"data_block_addr{i}") stream.seek(addr) id_string, block_len = COMMON_SHORT_u(stream.read(COMMON_SHORT_SIZE)) @@ -2326,7 +2326,7 @@ def _get_data_blocks_info( ld = ListData(address=address, stream=stream) has_invalidation = ld.flags & v4c.FLAG_LD_INVALIDATION_PRESENT for i in range(ld.data_block_nr): - addr = ld[f"data_block_addr{i}"] + addr = getattr(ld, f"data_block_addr{i}") stream.seek(addr) id_string, block_len = COMMON_SHORT_u(stream.read(COMMON_SHORT_SIZE)) @@ -8387,8 +8387,8 @@ def _yield_selected_signals( except: break - # if perf_counter() - tt > 120: - # x = 1 / 0 + if perf_counter() - tt > 120: + x = 1 / 0 # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) diff --git a/src/asammdf/blocks/v4_blocks.py b/src/asammdf/blocks/v4_blocks.py index 2d53ac2f9..916798ece 100644 --- a/src/asammdf/blocks/v4_blocks.py +++ b/src/asammdf/blocks/v4_blocks.py @@ -349,10 +349,10 @@ def to_blocks(self, address: int, blocks: list[Any], defined_texts: dict[str, in return address def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: fmt = f"{v4c.FMT_AT_COMMON}{self.embedded_size}s" @@ -1024,10 +1024,10 @@ def __init__(self, **kwargs) -> None: self.fast_path = None def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def to_blocks( self, @@ -1669,10 +1669,10 @@ def __init__(self, **kwargs) -> None: self[f"dim_size_{i}"] = kwargs[f"dim_size_{i}"] def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __str__(self) -> str: return f"" @@ -2013,10 +2013,10 @@ def __init__(self, **kwargs) -> None: self.links_nr = 6 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def to_blocks( self, @@ -4105,10 +4105,10 @@ def metadata(self, indent: str = "") -> str: return "\n".join(metadata) def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __contains__(self, item: str) -> bool: return hasattr(self, item) @@ -4422,10 +4422,10 @@ def __init__(self, **kwargs) -> None: self.data = kwargs["data"] def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: return v4c.COMMON_p(self.id, self.reserved0, self.block_len, self.links_nr) + self.data @@ -4597,10 +4597,10 @@ def __getattribute__(self, item: str) -> Any: return value def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __str__(self) -> str: return f"""""" @@ -4769,10 +4769,10 @@ def to_blocks(self, address: int, blocks: list[Any], defined_texts: dict[str, in return address def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: result = v4c.DATA_GROUP_p( @@ -4861,11 +4861,11 @@ def __init__(self, **kwargs) -> None: self.next_dl_addr = links[0] for i, addr in enumerate(links[1:]): - self[f"data_block_addr{i}"] = addr + setattr(self, f"data_block_addr{i}", addr) stream.seek(address + self.links_nr * 8) - self.flags = stream.read(1)[0] + self.flags = stream.read_byte() if self.flags & v4c.FLAG_DL_EQUAL_LENGHT: (self.reserved1, self.data_block_nr, self.data_block_len) = unpack("<3sIQ", stream.read(15)) else: @@ -4875,7 +4875,7 @@ def __init__(self, **kwargs) -> None: stream.read((self.links_nr - 1) * 8), ) for i, offset in enumerate(offsets): - self[f"offset_{i}"] = offset + setattr(self, f"offset_{i}", offset) else: stream.seek(address) @@ -4928,10 +4928,10 @@ def __init__(self, **kwargs) -> None: self[f"offset_{i}"] = kwargs[f"offset_{i}"] def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: keys = ("id", "reserved0", "block_len", "links_nr", "next_dl_addr") @@ -5184,10 +5184,10 @@ def __bytes__(self) -> bytes: return result def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __str__(self) -> str: return f"EventBlock (name: {self.name}, comment: {self.comment}, address: {hex(self.address)}, scopes: {self.scopes}, fields: {super().__str__()})" @@ -5305,10 +5305,10 @@ def __init__(self, **kwargs) -> None: self.unfinalized_custom_flags = 0 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: result = pack( @@ -5432,10 +5432,10 @@ def to_blocks(self, address: int, blocks: list[Any], defined_texts: dict[str, in return address def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: result = pack(v4c.FMT_FILE_HISTORY, *[self[key] for key in v4c.KEYS_FILE_HISTORY]) @@ -5729,10 +5729,10 @@ def subject(self, value): self._common_properties["subject"] = value def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) @property def start_time(self) -> datetime: @@ -5916,10 +5916,10 @@ def __init__(self, **kwargs) -> None: self.reserved1 = b"\x00" * 5 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: result = pack(v4c.FMT_HL_BLOCK, *[self[key] for key in v4c.KEYS_HL_BLOCK]) @@ -6130,10 +6130,10 @@ def __init__(self, **kwargs) -> None: self.block_len = 24 + self.links_nr * 8 + 16 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: fmt = "<4sI3Q" @@ -6298,10 +6298,10 @@ def __init__(self, **kwargs) -> None: self.reserved1 = b"\x00" * 5 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __contains__(self, item: str) -> bool: return hasattr(self, item) @@ -6568,10 +6568,10 @@ def __init__(self, **kwargs) -> None: self.block_len = size + 32 - size % 8 def __getitem__(self, item: str) -> Any: - return self.__getattribute__(item) + return getattr(self, item) def __setitem__(self, item: str, value: Any) -> None: - self.__setattr__(item, value) + setattr(self, item, value) def __bytes__(self) -> bytes: return pack( From e6052eb0a25b22609a7bdea50022f9b2f8c26e11 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 12:55:47 +0200 Subject: [PATCH 23/60] compile libdeflate --- .gitignore | 2 +- .gitmodules | 3 + CMakeLists.txt | 21 +- ext/libdeflate | 1 + src/asammdf/blocks/cutils.c | 3585 ++++++++++++++++++++--------------- 5 files changed, 2089 insertions(+), 1523 deletions(-) create mode 100644 .gitmodules create mode 160000 ext/libdeflate diff --git a/.gitignore b/.gitignore index 598f821d1..034e9720e 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,7 @@ dist/ downloads/ eggs/ .eggs/ -lib/ +/lib/ lib64/ parts/ sdist/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..aacea2395 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ext/libdeflate"] + path = ext/libdeflate + url = https://github.com/ebiggers/libdeflate diff --git a/CMakeLists.txt b/CMakeLists.txt index d6fa4bd85..fc74cb7d3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,24 +1,21 @@ -cmake_minimum_required(VERSION 3.15...3.26) - -project( - ${SKBUILD_PROJECT_NAME} - LANGUAGES C - VERSION ${SKBUILD_PROJECT_VERSION}) +cmake_minimum_required(VERSION 3.26...3.29) +project(${SKBUILD_PROJECT_NAME} LANGUAGES C VERSION ${SKBUILD_PROJECT_VERSION}) find_package( Python + REQUIRED COMPONENTS Interpreter Development.Module ${SKBUILD_SABI_COMPONENT} NumPy - REQUIRED) + ) + + # Add submodule libdeflate +add_subdirectory(ext/libdeflate EXCLUDE_FROM_ALL) -python_add_library(cutils - MODULE - src/asammdf/blocks/cutils.c - WITH_SOABI USE_SABI 3.9) +python_add_library(cutils MODULE WITH_SOABI USE_SABI 3.9 src/asammdf/blocks/cutils.c) -target_link_libraries(cutils PRIVATE Python::NumPy) +target_link_libraries(cutils PRIVATE Python::NumPy libdeflate::libdeflate_static) install(TARGETS cutils DESTINATION "asammdf/blocks") diff --git a/ext/libdeflate b/ext/libdeflate new file mode 160000 index 000000000..78051988f --- /dev/null +++ b/ext/libdeflate @@ -0,0 +1 @@ +Subproject commit 78051988f96dc8d8916310d8b24021f01bd9e102 diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 093eb27d1..ab5cc7b3b 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1,21 +1,24 @@ #define NPY_NO_DEPRECATED_API NPY_1_22_API_VERSION -//#define Py_LIMITED_API 0x030900f0 - #define PY_SSIZE_T_CLEAN 1 #include +#define _FILE_OFFSET_BITS 64 #include "numpy/arrayobject.h" #include "numpy/ndarrayobject.h" #include #include #include #include -#define MAX_THREADS 12 +#include +#include "data_block_utilities.c" -#if defined(_WIN32) - #include - #include -#else - #include +#if defined(_WIN32) +#include +#include +#else +#include +#include +#define Sleep(x) usleep((int)(1000 * (x))) +#include #endif #define MAX(a,b) ((a) > (b) ? (a) : (b)) @@ -29,1894 +32,2454 @@ char err_string[1024]; struct rec_info { - uint32_t id; - uint32_t size; - PyObject *mlist; + uint32_t id; + uint32_t size; + PyObject *mlist; }; struct node { - struct node *next; - struct rec_info info; + struct node *next; + struct rec_info info; }; + static PyObject *sort_data_block(PyObject *self, PyObject *args) { - uint64_t id_size = 0, position = 0, size; - uint32_t rec_size, length, rec_id; - PyObject *signal_data, *partial_records, *record_size, *optional, *mlist; - PyObject *bts, *key, *value, *rem = NULL; - unsigned char *buf, *end, *orig; - struct node *head = NULL, *last = NULL, *item; - - if (!PyArg_ParseTuple(args, "OOOK|O", &signal_data, &partial_records, &record_size, &id_size, &optional)) + uint64_t id_size = 0, position = 0, size; + uint32_t rec_size, length, rec_id; + PyObject *signal_data, *partial_records, *record_size, *optional, *mlist; + PyObject *bts, *key, *value, *rem = NULL; + unsigned char *buf, *end, *orig; + struct node *head = NULL, *last = NULL, *item; + + if (!PyArg_ParseTuple(args, "OOOK|O", &signal_data, &partial_records, &record_size, &id_size, &optional)) + { + return 0; + } + else + { + Py_ssize_t pos = 0; + position = 0; + + while (PyDict_Next(record_size, &pos, &key, &value)) { - return 0; + item = malloc(sizeof(struct node)); + item->info.id = PyLong_AsUnsignedLong(key); + item->info.size = PyLong_AsUnsignedLong(value); + item->info.mlist = PyDict_GetItem(partial_records, key); + item->next = NULL; + if (last) + last->next = item; + if (!head) + head = item; + last = item; } - else + + buf = (unsigned char *)PyBytes_AsString(signal_data); + orig = buf; + size = (uint64_t)PyBytes_Size(signal_data); + end = buf + size; + + while ((buf + id_size) < end) { - Py_ssize_t pos = 0; - position = 0; - while (PyDict_Next(record_size, &pos, &key, &value)) - { - item = malloc(sizeof(struct node)); - item->info.id = PyLong_AsUnsignedLong(key); - item->info.size = PyLong_AsUnsignedLong(value); - item->info.mlist = PyDict_GetItem(partial_records, key); - item->next = NULL; - if (last) - last->next = item; - if (!head) - head = item; - last = item; - } + rec_id = 0; + for (unsigned char i = 0; i < id_size; i++, buf++) + { + rec_id += (*buf) << (i << 3); + } - buf = (unsigned char *)PyBytes_AsString(signal_data); - orig = buf; - size = (uint64_t)PyBytes_Size(signal_data); - end = buf + size; + key = PyLong_FromUnsignedLong(rec_id); + value = PyDict_GetItem(record_size, key); - while ((buf + id_size) < end) - { + if (!value) + { + rem = PyBytes_FromStringAndSize(NULL, 0); + Py_XDECREF(key); + return rem; + } + else + { + rec_size = PyLong_AsUnsignedLong(value); + } + + mlist = PyDict_GetItem(partial_records, key); + + if (!mlist) + { + rem = PyBytes_FromStringAndSize(NULL, 0); + Py_XDECREF(key); + return rem; + } - rec_id = 0; - for (unsigned char i = 0; i < id_size; i++, buf++) - { - rec_id += (*buf) << (i << 3); - } - - key = PyLong_FromUnsignedLong(rec_id); - value = PyDict_GetItem(record_size, key); - - if (!value) - { - rem = PyBytes_FromStringAndSize(NULL, 0); - Py_XDECREF(key); - return rem; - } - else - { - rec_size = PyLong_AsUnsignedLong(value); - } - - mlist = PyDict_GetItem(partial_records, key); - - if (!mlist) - { - rem = PyBytes_FromStringAndSize(NULL, 0); - Py_XDECREF(key); - return rem; - } - - Py_XDECREF(key); - - if (rec_size) - { - if (rec_size + position + id_size > size) - { - break; - } - bts = PyBytes_FromStringAndSize((const char *)buf, (Py_ssize_t)rec_size); - PyList_Append( - mlist, - bts); - Py_XDECREF(bts); - - buf += rec_size; - } - else - { - if (4 + position + id_size > size) - { - break; - } - rec_size = (buf[3] << 24) + (buf[2] << 16) + (buf[1] << 8) + buf[0]; - length = rec_size + 4; - if (position + length + id_size > size) - { - break; - } - bts = PyBytes_FromStringAndSize((const char *)buf, (Py_ssize_t)length); - PyList_Append(mlist, bts); - Py_XDECREF(bts); - buf += length; - } - - position = (uint64_t)(buf - orig); - } + Py_XDECREF(key); - while (head != NULL) + if (rec_size) + { + if (rec_size + position + id_size > size) { - item = head; - item->info.mlist = NULL; - - head = head->next; - item->next = NULL; - free(item); + break; + } + bts = PyBytes_FromStringAndSize((const char *)buf, (Py_ssize_t)rec_size); + PyList_Append( + mlist, + bts); + Py_XDECREF(bts); + + buf += rec_size; + } + else + { + if (4 + position + id_size > size) + { + break; } + rec_size = (buf[3] << 24) + (buf[2] << 16) + (buf[1] << 8) + buf[0]; + length = rec_size + 4; + if (position + length + id_size > size) + { + break; + } + bts = PyBytes_FromStringAndSize((const char *)buf, (Py_ssize_t)length); + PyList_Append(mlist, bts); + Py_XDECREF(bts); + buf += length; + } - head = NULL; - last = NULL; - item = NULL; - mlist = NULL; - - rem = PyBytes_FromStringAndSize((const char *)(orig + position), (Py_ssize_t)(size - position)); + position = (uint64_t)(buf - orig); + } - buf = NULL; - orig = NULL; - end = NULL; + while (head != NULL) + { + item = head; + item->info.mlist = NULL; - return rem; + head = head->next; + item->next = NULL; + free(item); } + + head = NULL; + last = NULL; + item = NULL; + mlist = NULL; + + rem = PyBytes_FromStringAndSize((const char *)(orig + position), (Py_ssize_t)(size - position)); + + buf = NULL; + orig = NULL; + end = NULL; + + return rem; + } } static Py_ssize_t calc_size(char *buf) { - return (unsigned char)buf[3] << 24 | - (unsigned char)buf[2] << 16 | - (unsigned char)buf[1] << 8 | - (unsigned char)buf[0]; + return (unsigned char)buf[3] << 24 | + (unsigned char)buf[2] << 16 | + (unsigned char)buf[1] << 8 | + (unsigned char)buf[0]; } static PyObject *extract(PyObject *self, PyObject *args) { - Py_ssize_t i = 0, count, max = 0, list_count; - int64_t offset; - Py_ssize_t pos = 0, size = 0; - PyObject *signal_data, *is_byte_array, *offsets, *offsets_list = NULL; - char *buf; - PyArrayObject *vals; - PyArray_Descr *descr; - unsigned char *addr2; - - if (!PyArg_ParseTuple(args, "OOO", &signal_data, &is_byte_array, &offsets)) + Py_ssize_t i = 0, count, max = 0, list_count; + int64_t offset; + Py_ssize_t pos = 0, size = 0; + PyObject *signal_data, *is_byte_array, *offsets, *offsets_list = NULL; + char *buf; + PyArrayObject *vals; + PyArray_Descr *descr; + unsigned char *addr2; + + if (!PyArg_ParseTuple(args, "OOO", &signal_data, &is_byte_array, &offsets)) + { + return 0; + } + else + { + Py_ssize_t max_size = 0; + Py_ssize_t retval = PyBytes_AsStringAndSize(signal_data, &buf, &max_size); + + if (retval == -1) + { + printf("PyBytes_AsStringAndSize error\n"); + return NULL; + } + + count = 0; + pos = 0; + + if (offsets == Py_None) { - return 0; + while ((pos + 4) <= max_size) + { + size = calc_size(&buf[pos]); + + if ((pos + 4 + size) > max_size) + break; + + if (max < size) + max = size; + pos += 4 + size; + count++; + } } else { - Py_ssize_t max_size = 0; - Py_ssize_t retval = PyBytes_AsStringAndSize(signal_data, &buf, &max_size); + offsets_list = PyObject_CallMethod(offsets, "tolist", NULL); + list_count = (Py_ssize_t)PyList_Size(offsets_list); + for (i = 0; i < list_count; i++) + { + offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); + if ((offset + 4) > max_size) + break; + size = calc_size(&buf[offset]); + if ((offset + 4 + size) > max_size) + break; + if (max < size) + max = size; + count++; + } + } - if (retval == -1) - { - printf("PyBytes_AsStringAndSize error\n"); - return NULL; - } + if (PyObject_IsTrue(is_byte_array)) + { - count = 0; - pos = 0; + npy_intp dims[2]; + dims[0] = count; + dims[1] = max; - if (offsets == Py_None) - { - while ((pos + 4) <= max_size) - { - size = calc_size(&buf[pos]); - - if ((pos + 4 + size) > max_size) - break; - - if (max < size) - max = size; - pos += 4 + size; - count++; - } - } - else - { - offsets_list = PyObject_CallMethod(offsets, "tolist", NULL); - list_count = (Py_ssize_t)PyList_Size(offsets_list); - for (i = 0; i < list_count; i++) - { - offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); - if ((offset + 4) > max_size) - break; - size = calc_size(&buf[offset]); - if ((offset + 4 + size) > max_size) - break; - if (max < size) - max = size; - count++; - } - } + vals = (PyArrayObject *)PyArray_ZEROS(2, dims, NPY_UBYTE, 0); - if (PyObject_IsTrue(is_byte_array)) + if (offsets == Py_None) + { + pos = 0; + for (i = 0; i < count; i++) { - - npy_intp dims[2]; - dims[0] = count; - dims[1] = max; - - vals = (PyArrayObject *)PyArray_ZEROS(2, dims, NPY_UBYTE, 0); - - if (offsets == Py_None) - { - pos = 0; - for (i = 0; i < count; i++) - { - addr2 = (unsigned char *)PyArray_GETPTR2(vals, i, 0); - size = calc_size(&buf[pos]); - pos += 4; - memcpy(addr2, &buf[pos], size); - pos += size; - } - } - else - { - for (i = 0; i < count; i++) - { - addr2 = (unsigned char *)PyArray_GETPTR2(vals, i, 0); - offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); - size = calc_size(&buf[offset]); - memcpy(addr2, &buf[offset + 4], size); - } - } - } - else + addr2 = (unsigned char *)PyArray_GETPTR2(vals, i, 0); + size = calc_size(&buf[pos]); + pos += 4; + memcpy(addr2, &buf[pos], size); + pos += size; + } + } + else + { + for (i = 0; i < count; i++) { - npy_intp dims[1]; - dims[0] = count; + addr2 = (unsigned char *)PyArray_GETPTR2(vals, i, 0); + offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); + size = calc_size(&buf[offset]); + memcpy(addr2, &buf[offset + 4], size); + } + } + } + else + { + npy_intp dims[1]; + dims[0] = count; - descr = PyArray_DescrFromType(NPY_STRING); - descr = PyArray_DescrNew(descr); + descr = PyArray_DescrFromType(NPY_STRING); + descr = PyArray_DescrNew(descr); #if NPY_ABI_VERSION < 0x02000000 - descr->elsize = (int)max; + descr->elsize = (int)max; #else - PyDataType_SET_ELSIZE(descr, max); + PyDataType_SET_ELSIZE(descr, max); #endif - vals = (PyArrayObject *)PyArray_Zeros(1, dims, descr, 0); - - if (offsets == Py_None) - { - - pos = 0; - for (i = 0; i < count; i++) - { - addr2 = (unsigned char *)PyArray_GETPTR1(vals, i); - size = calc_size(&buf[pos]); - pos += 4; - memcpy(addr2, &buf[pos], size); - pos += size; - } - } - else - { - for (i = 0; i < count; i++) - { - addr2 = (unsigned char *)PyArray_GETPTR1(vals, i); - offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); - size = calc_size(&buf[offset]); - memcpy(addr2, &buf[offset + 4], size); - } - Py_XDECREF(offsets_list); - } + vals = (PyArrayObject *)PyArray_Zeros(1, dims, descr, 0); + + if (offsets == Py_None) + { + + pos = 0; + for (i = 0; i < count; i++) + { + addr2 = (unsigned char *)PyArray_GETPTR1(vals, i); + size = calc_size(&buf[pos]); + pos += 4; + memcpy(addr2, &buf[pos], size); + pos += size; + } + } + else + { + for (i = 0; i < count; i++) + { + addr2 = (unsigned char *)PyArray_GETPTR1(vals, i); + offset = (int64_t)PyLong_AsLongLong(PyList_GetItem(offsets_list, i)); + size = calc_size(&buf[offset]); + memcpy(addr2, &buf[offset + 4], size); } + Py_XDECREF(offsets_list); + } } + } - return (PyObject *)vals; + return (PyObject *)vals; } static PyObject *lengths(PyObject *self, PyObject *args) { - Py_ssize_t i = 0; - Py_ssize_t count; - PyObject *lst, *values, *item; + Py_ssize_t i = 0; + Py_ssize_t count; + PyObject *lst, *values, *item; - if (!PyArg_ParseTuple(args, "O", &lst)) - { - return 0; - } - else - { + if (!PyArg_ParseTuple(args, "O", &lst)) + { + return 0; + } + else + { - count = PyList_Size(lst); + count = PyList_Size(lst); - values = PyTuple_New(count); + values = PyTuple_New(count); - for (i = 0; i < (Py_ssize_t)count; i++) - { - item = PyList_GetItem(lst, i); - PyTuple_SetItem(values, i, PyLong_FromSsize_t(PyBytes_Size(item))); - } + for (i = 0; i < (Py_ssize_t)count; i++) + { + item = PyList_GetItem(lst, i); + PyTuple_SetItem(values, i, PyLong_FromSsize_t(PyBytes_Size(item))); } + } - return values; + return values; } static PyObject *get_vlsd_offsets(PyObject *self, PyObject *args) { - Py_ssize_t i = 0; - Py_ssize_t count; - PyObject *lst, *item, *result; - npy_intp dim[1]; - PyArrayObject *values; + Py_ssize_t i = 0; + Py_ssize_t count; + PyObject *lst, *item, *result; + npy_intp dim[1]; + PyArrayObject *values; - uint64_t current_size = 0; + uint64_t current_size = 0; - void *h_result; + void *h_result; - if (!PyArg_ParseTuple(args, "O", &lst)) - { - return 0; - } - else - { + if (!PyArg_ParseTuple(args, "O", &lst)) + { + return 0; + } + else + { - count = PyList_Size(lst); - dim[0] = (Py_ssize_t)count; - values = (PyArrayObject *)PyArray_SimpleNew(1, dim, NPY_ULONGLONG); + count = PyList_Size(lst); + dim[0] = (Py_ssize_t)count; + values = (PyArrayObject *)PyArray_SimpleNew(1, dim, NPY_ULONGLONG); - for (i = 0; i < (Py_ssize_t)count; i++) - { - h_result = PyArray_GETPTR1(values, i); - item = PyList_GetItem(lst, i); - *((uint64_t *)h_result) = current_size; - current_size += (uint64_t)PyBytes_Size(item); - } + for (i = 0; i < (Py_ssize_t)count; i++) + { + h_result = PyArray_GETPTR1(values, i); + item = PyList_GetItem(lst, i); + *((uint64_t *)h_result) = current_size; + current_size += (uint64_t)PyBytes_Size(item); } + } - result = PyTuple_Pack(2, values, PyLong_FromUnsignedLongLong(current_size)); + result = PyTuple_Pack(2, values, PyLong_FromUnsignedLongLong(current_size)); - return result; + return result; } static PyObject *get_vlsd_max_sample_size(PyObject *self, PyObject *args) { - Py_ssize_t i = 0; - Py_ssize_t count = 0; - PyObject *data, *offsets; - uint64_t max_size = 0; - uint32_t vlsd_size = 0; - char *inptr = NULL, *data_end = NULL, *current_position = NULL; + Py_ssize_t i = 0; + Py_ssize_t count = 0; + PyObject *data, *offsets; + uint64_t max_size = 0; + uint32_t vlsd_size = 0; + char *inptr = NULL, *data_end = NULL, *current_position = NULL; - uint64_t current_size = 0, *offsets_array; + uint64_t current_size = 0, *offsets_array; - if (!PyArg_ParseTuple(args, "OOn", &data, &offsets, &count)) - { - return 0; - } - else + if (!PyArg_ParseTuple(args, "OOn", &data, &offsets, &count)) + { + return 0; + } + else + { + offsets_array = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)offsets, 0); + inptr = PyBytes_AsString(data); + data_end = inptr + PyBytes_Size(data); + + for (i = 0; i < count; i++, offsets_array++) { - offsets_array = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)offsets, 0); - inptr = PyBytes_AsString(data); - data_end = inptr + PyBytes_Size(data); - - for (i = 0; i < count; i++, offsets_array++) - { - current_position = inptr + *offsets_array; - if (current_position >= data_end) - { - return PyLong_FromUnsignedLongLong(max_size); - } - memcpy(&vlsd_size, inptr + *offsets_array, 4); - if (vlsd_size > max_size) - { - max_size = vlsd_size; - } - } + current_position = inptr + *offsets_array; + if (current_position >= data_end) + { + return PyLong_FromUnsignedLongLong(max_size); + } + memcpy(&vlsd_size, inptr + *offsets_array, 4); + if (vlsd_size > max_size) + { + max_size = vlsd_size; + } } + } - return PyLong_FromUnsignedLongLong(max_size); + return PyLong_FromUnsignedLongLong(max_size); } void positions_char(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - char min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; - - indata = (char *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + char min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - char *ps; - double tmin, tmax, *ts, *pt; + indata = (char *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - ps = (char *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + char *ps; + double tmin, tmax, *ts, *pt; - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (char *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_short(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - short min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; + short min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - indata = (short *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + indata = (short *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - short *ps; - double tmin, tmax, *ts, *pt; + short *ps; + double tmin, tmax, *ts, *pt; - ps = (short *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + ps = (short *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { - - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_long(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - int32_t min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; - - indata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + int32_t min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - long *ps; - double tmin, tmax, *ts, *pt; + indata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - ps = (int32_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + long *ps; + double tmin, tmax, *ts, *pt; - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (int32_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_long_long(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - int64_t min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; + int64_t min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - indata = (int64_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + indata = (int64_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - int64_t *ps; - double tmin, tmax, *ts, *pt; + int64_t *ps; + double tmin, tmax, *ts, *pt; - ps = (int64_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (int64_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_unsigned_char(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - unsigned char min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; - - indata = (unsigned char *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + unsigned char min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - unsigned char *ps; - double tmin, tmax, *ts, *pt; + indata = (unsigned char *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - ps = (unsigned char *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + unsigned char *ps; + double tmin, tmax, *ts, *pt; - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (unsigned char *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; + } + } } void positions_unsigned_short(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - unsigned short min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; + unsigned short min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - indata = (unsigned short *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + indata = (unsigned short *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - unsigned short *ps; - double tmin, tmax, *ts, *pt; + unsigned short *ps; + double tmin, tmax, *ts, *pt; - ps = (unsigned short *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + ps = (unsigned short *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { - - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_unsigned_long(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - uint32_t min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; - - indata = (uint32_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + uint32_t min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - uint32_t *ps; - double tmin, tmax, *ts, *pt; + indata = (uint32_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - ps = (uint32_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + uint32_t *ps; + double tmin, tmax, *ts, *pt; - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (uint32_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_unsigned_long_long(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - uint64_t min, max, *indata; - int32_t *outdata; - Py_ssize_t pos_min = 0, pos_max = 0; + uint64_t min, max, *indata; + int32_t *outdata; + Py_ssize_t pos_min = 0, pos_max = 0; - indata = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + indata = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - uint64_t *ps; - double tmin, tmax, *ts, *pt; + uint64_t *ps; + double tmin, tmax, *ts, *pt; - ps = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (uint64_t *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } + } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; } + } } void positions_float(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - float min, max, *indata = NULL; - int32_t *outdata = NULL; - Py_ssize_t pos_min = 0, pos_max = 0; - - indata = (float *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + float min, max, *indata = NULL; + int32_t *outdata = NULL; + Py_ssize_t pos_min = 0, pos_max = 0; - float *ps; - double tmin, tmax, *ts, *pt; + indata = (float *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - ps = (float *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + float *ps; + double tmin, tmax, *ts, *pt; - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { + ps = (float *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } + + current_pos++; + + if ((i == stop_index) && (j == last)) + break; + } } + + if (pos_min < pos_max) + { + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; + } + else + { + *outdata++ = pos_max; + *outdata++ = pos_min; + + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; + } + } } void positions_double(PyObject *samples, PyObject *timestamps, PyObject *plot_samples, PyObject *plot_timestamps, PyObject *result, int32_t step, int32_t count, int32_t last) { - double min, max, *indata = NULL; - int32_t *outdata = NULL; - Py_ssize_t pos_min = 0, pos_max = 0; + double min, max, *indata = NULL; + int32_t *outdata = NULL; + Py_ssize_t pos_min = 0, pos_max = 0; - indata = (double *)PyArray_GETPTR1((PyArrayObject *)samples, 0); - outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); + indata = (double *)PyArray_GETPTR1((PyArrayObject *)samples, 0); + outdata = (int32_t *)PyArray_GETPTR1((PyArrayObject *)result, 0); - double *ps = NULL; - double tmin, tmax, *ts = NULL, *pt = NULL; + double *ps = NULL; + double tmin, tmax, *ts = NULL, *pt = NULL; - ps = (double *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); - pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); - ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); + ps = (double *)PyArray_GETPTR1((PyArrayObject *)plot_samples, 0); + pt = (double *)PyArray_GETPTR1((PyArrayObject *)plot_timestamps, 0); + ts = (double *)PyArray_GETPTR1((PyArrayObject *)timestamps, 0); - Py_ssize_t current_pos = 0, stop_index = count - 1; - for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) - { - - pos_min = current_pos; - pos_max = current_pos; - min = max = *indata; - indata++; - current_pos++; + Py_ssize_t current_pos = 0, stop_index = count - 1; + for (Py_ssize_t i = 0; i < (Py_ssize_t)count; i++) + { - tmin = tmax = *ts; - ts++; + pos_min = current_pos; + pos_max = current_pos; + min = max = *indata; + indata++; + current_pos++; - if ((i != stop_index) || (0 != last)) - { + tmin = tmax = *ts; + ts++; - for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) - { - if (*indata < min) - { - min = *indata; - pos_min = current_pos; - tmin = *ts; - } - else if (*indata > max) - { - max = *indata; - pos_max = current_pos; - tmax = *ts; - } - - current_pos++; - - if ((i == stop_index) && (j == last)) - break; - } - } + if ((i != stop_index) || (0 != last)) + { - if (pos_min < pos_max) + for (Py_ssize_t j = 1; j < step; j++, indata++, ts++) + { + if (*indata < min) { - *outdata++ = pos_min; - *outdata++ = pos_max; - - *ps++ = min; - *pt++ = tmin; - *ps++ = max; - *pt++ = tmax; + min = *indata; + pos_min = current_pos; + tmin = *ts; } - else + else if (*indata > max) { - *outdata++ = pos_max; - *outdata++ = pos_min; - - *ps++ = max; - *pt++ = tmax; - *ps++ = min; - *pt++ = tmin; + max = *indata; + pos_max = current_pos; + tmax = *ts; } - } -} -static PyObject *positions(PyObject *self, PyObject *args) -{ - int32_t count, step, last; - unsigned char itemsize; - char *kind; - Py_ssize_t _size; + current_pos++; - PyObject *samples, *timestamps, *result, *step_obj, *count_obj, *last_obj, *plot_samples, *plot_timestamps; + if ((i == stop_index) && (j == last)) + break; + } + } - if (!PyArg_ParseTuple(args, "OOOOOOOOs#B", - &samples, ×tamps, &plot_samples, &plot_timestamps, &result, &step_obj, &count_obj, &last_obj, &kind, &_size, &itemsize)) + if (pos_min < pos_max) { - return NULL; + *outdata++ = pos_min; + *outdata++ = pos_max; + + *ps++ = min; + *pt++ = tmin; + *ps++ = max; + *pt++ = tmax; } else { - count = PyLong_AsLong(count_obj); - step = PyLong_AsLong(step_obj); - last = PyLong_AsLong(last_obj) - 1; + *outdata++ = pos_max; + *outdata++ = pos_min; - if (kind[0] == 'u') - { - if (itemsize == 1) - positions_unsigned_char(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else if (itemsize == 2) - positions_unsigned_short(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else if (itemsize == 4) - positions_unsigned_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else - positions_unsigned_long_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - } - else if (kind[0] == 'i') - { - if (itemsize == 1) - positions_char(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else if (itemsize == 2) - positions_short(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else if (itemsize == 4) - positions_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else - positions_long_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - } - else if (kind[0] == 'f') - { - if (itemsize == 4) - positions_float(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - else - positions_double(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); - } + *ps++ = max; + *pt++ = tmax; + *ps++ = min; + *pt++ = tmin; + } + } +} - Py_INCREF(Py_None); - return Py_None; +static PyObject *positions(PyObject *self, PyObject *args) +{ + int32_t count, step, last; + unsigned char itemsize; + char *kind; + Py_ssize_t _size; + + PyObject *samples, *timestamps, *result, *step_obj, *count_obj, *last_obj, *plot_samples, *plot_timestamps; + + if (!PyArg_ParseTuple(args, "OOOOOOOOs#B", + &samples, ×tamps, &plot_samples, &plot_timestamps, &result, &step_obj, &count_obj, &last_obj, &kind, &_size, &itemsize)) + { + return NULL; + } + else + { + count = PyLong_AsLong(count_obj); + step = PyLong_AsLong(step_obj); + last = PyLong_AsLong(last_obj) - 1; + + if (kind[0] == 'u') + { + if (itemsize == 1) + positions_unsigned_char(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else if (itemsize == 2) + positions_unsigned_short(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else if (itemsize == 4) + positions_unsigned_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else + positions_unsigned_long_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + } + else if (kind[0] == 'i') + { + if (itemsize == 1) + positions_char(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else if (itemsize == 2) + positions_short(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else if (itemsize == 4) + positions_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else + positions_long_long(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + } + else if (kind[0] == 'f') + { + if (itemsize == 4) + positions_float(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); + else + positions_double(samples, timestamps, plot_samples, plot_timestamps, result, step, count, last); } + + Py_INCREF(Py_None); + return Py_None; + } } static PyObject *get_channel_raw_bytes(PyObject *self, PyObject *args) { - Py_ssize_t count, size, actual_byte_count, delta; - PyObject *data_block, *out; + Py_ssize_t count, size, actual_byte_count, delta; + PyObject *data_block, *out; + + Py_ssize_t record_size, byte_offset, byte_count; - Py_ssize_t record_size, byte_offset, byte_count; + char *inptr, *outptr; - char *inptr, *outptr; + if (!PyArg_ParseTuple(args, "Onnn", &data_block, &record_size, &byte_offset, &byte_count)) + { + return 0; + } + else + { + if (PyBytes_Check(data_block)) { + size = PyBytes_Size(data_block); + inptr = PyBytes_AsString(data_block); + } + else { + size = PyByteArray_Size(data_block); + inptr = PyByteArray_AsString(data_block); + } - if (!PyArg_ParseTuple(args, "Onnn", &data_block, &record_size, &byte_offset, &byte_count)) + if (!record_size) { - return 0; + out = PyByteArray_FromStringAndSize(NULL, 0); } - else + else if (record_size < byte_offset + byte_count) { - if (PyBytes_Check(data_block)) { - size = PyBytes_Size(data_block); - inptr = PyBytes_AsString(data_block); - } - else { - size = PyByteArray_Size(data_block); - inptr = PyByteArray_AsString(data_block); - } - - if (!record_size) - { - out = PyByteArray_FromStringAndSize(NULL, 0); - } - else if (record_size < byte_offset + byte_count) - { - delta = byte_offset + byte_count - record_size; - actual_byte_count = record_size - byte_offset; + delta = byte_offset + byte_count - record_size; + actual_byte_count = record_size - byte_offset; - count = size / record_size; + count = size / record_size; - out = PyByteArray_FromStringAndSize(NULL, count * byte_count); - outptr = PyByteArray_AsString(out); + out = PyByteArray_FromStringAndSize(NULL, count * byte_count); + outptr = PyByteArray_AsString(out); - inptr += byte_offset; + inptr += byte_offset; - for (Py_ssize_t i = 0; i < count; i++) - { - for (Py_ssize_t j = 0; j < actual_byte_count; j++) - *outptr++ = *inptr++; + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < actual_byte_count; j++) + *outptr++ = *inptr++; - inptr += record_size - actual_byte_count; - for (Py_ssize_t j = 0; j < delta; j++) - { - *outptr++ = '\0'; - } - } - } - else + inptr += record_size - actual_byte_count; + for (Py_ssize_t j = 0; j < delta; j++) { - count = size / record_size; + *outptr++ = '\0'; + } + } + } + else + { + count = size / record_size; - out = PyByteArray_FromStringAndSize(NULL, count * byte_count); - outptr = PyByteArray_AsString(out); + out = PyByteArray_FromStringAndSize(NULL, count * byte_count); + outptr = PyByteArray_AsString(out); - inptr += byte_offset; + inptr += byte_offset; - delta = record_size - byte_count; + delta = record_size - byte_count; - for (Py_ssize_t i = 0; i < count; i++) - { - for (Py_ssize_t j = 0; j < byte_count; j++) - *outptr++ = *inptr++; - inptr += delta; - } - } + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < byte_count; j++) + *outptr++ = *inptr++; + inptr += delta; + } + } - data_block = NULL; + data_block = NULL; - return out; - } + return out; + } } static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) { - Py_ssize_t count, size, actual_byte_count, delta, invalidation_pos, invalidation_size; - PyObject *data_block, *out; + Py_ssize_t count, size, actual_byte_count, delta, invalidation_pos, invalidation_size; + PyObject *data_block, *out; - Py_ssize_t record_size, byte_offset, byte_count; + Py_ssize_t record_size, byte_offset, byte_count; - uint8_t mask, *inptr, *outptr; + uint8_t mask, *inptr, *outptr; - if (!PyArg_ParseTuple(args, "Onn", &data_block, &invalidation_size, &invalidation_pos)) - { - return 0; + if (!PyArg_ParseTuple(args, "Onn", &data_block, &invalidation_size, &invalidation_pos)) + { + return 0; + } + else + { + if (PyBytes_Check(data_block)) { + size = PyBytes_Size(data_block); + inptr = (uint8_t *)PyBytes_AsString(data_block); + } + else { + size = PyByteArray_Size(data_block); + inptr = (uint8_t *)PyByteArray_AsString(data_block); } - else - { - if (PyBytes_Check(data_block)) { - size = PyBytes_Size(data_block); - inptr = (uint8_t *)PyBytes_AsString(data_block); - } - else { - size = PyByteArray_Size(data_block); - inptr = (uint8_t *)PyByteArray_AsString(data_block); - } - - count = size / invalidation_size; - byte_offset = invalidation_pos / 8; - mask = (uint8_t ) (1 << (invalidation_pos % 8)); - inptr += byte_offset; + count = size / invalidation_size; + byte_offset = invalidation_pos / 8; + mask = (uint8_t ) (1 << (invalidation_pos % 8)); - npy_intp dims[1]; - dims[0] = count; - out = (PyArrayObject *)PyArray_EMPTY(1, dims, NPY_BOOL, 0); - outptr = (uint8_t *)PyArray_GETPTR1(out, 0); + inptr += byte_offset; - for (int i=0; icount; - thread_idx = indata->idx; - thread_count = indata->thread_count; - data = indata->data; - for (Py_ssize_t i = 0; irecord_size < data->byte_offset + data->byte_count) - { - inptr = data->inptr; - delta = data->byte_offset + data->byte_count - data->record_size; - actual_byte_count = data->record_size - data->byte_offset; - - count = data->cycles; - - outptr = data->outptr; - inptr += data->byte_offset; - - for (Py_ssize_t i = 0; i < count; i++) - { - for (Py_ssize_t j = 0; j < actual_byte_count; j++) - *outptr++ = *inptr++; - - inptr += data->record_size - actual_byte_count; - for (Py_ssize_t j = 0; j < delta; j++) - { - *outptr++ = 0; - } - } - } - else + Py_ssize_t count, actual_byte_count, delta, thread_count; + PMYDATA data; + PMyChannelInfo indata; + indata = (PMyChannelInfo) lpParam; + + Py_ssize_t signal_count, thread_idx; + signal_count = indata->count; + thread_idx = indata->idx; + thread_count = indata->thread_count; + data = indata->data; + for (Py_ssize_t i = 0; irecord_size < data->byte_offset + data->byte_count) + { + inptr = data->inptr; + delta = data->byte_offset + data->byte_count - data->record_size; + actual_byte_count = data->record_size - data->byte_offset; + + count = data->cycles; + + outptr = data->outptr; + inptr += data->byte_offset; + + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < actual_byte_count; j++) + *outptr++ = *inptr++; + + inptr += data->record_size - actual_byte_count; + for (Py_ssize_t j = 0; j < delta; j++) { - inptr = data->inptr; - count = data->cycles; - outptr = data->outptr; - inptr += data->byte_offset; - - delta = data->record_size - data->byte_count; - - for (Py_ssize_t i = 0; i < count; i++) - { - for (Py_ssize_t j = 0; j < data->byte_count; j++) - *outptr++ = *inptr++; - inptr += delta; - } + *outptr++ = 0; } - - for (Py_ssize_t i = 0; iinptr; + count = data->cycles; + outptr = data->outptr; + inptr += data->byte_offset; + + delta = data->record_size - data->byte_count; + + for (Py_ssize_t i = 0; i < count; i++) + { + for (Py_ssize_t j = 0; j < data->byte_count; j++) + *outptr++ = *inptr++; + inptr += delta; + } } - return 0; + for (Py_ssize_t i = 0; icount; - thread_idx = indata->idx; - data = indata->data; - thread_count= indata->thread_count; - for (Py_ssize_t i = 0; irecord_size; - step = record_size - data->byte_count; - cycles = data->cycles; - byte_count = data->byte_count; - inptr = data->inptr; - - if (!record_size) continue; - - outptr = data->outptr + data->byte_offset; - - for (Py_ssize_t i=0; i count; + thread_idx = indata->idx; + data = indata->data; + thread_count= indata->thread_count; + for (Py_ssize_t i = 0; irecord_size; + step = record_size - data->byte_count; + cycles = data->cycles; + byte_count = data->byte_count; + inptr = data->inptr; + + if (!record_size) continue; + + outptr = data->outptr + data->byte_offset; + + for (Py_ssize_t i=0; i size) size = current_size; + } - for (j = 0; j < (Py_ssize_t)cols; j++) + return PyLong_FromSsize_t(size); + } +} + + +typedef struct InfoBlock { + int64_t address; + int64_t original_size; + int64_t compressed_size; + int64_t block_limit; + Py_ssize_t param; + Py_ssize_t block_type; + Py_ssize_t idx; + Py_ssize_t count; + +} InfoBlock, *PtrInfoBlock; + + +typedef struct SignalInfo { + int64_t byte_offset; + int64_t byte_count; + int32_t invalidation_bit_position; + uint8_t *data; + uint8_t *data_position; + PyObject *obj; +} SignalInfo, *PtrSignalInfo; + + +typedef struct ProcessesingBlock { + uint8_t stop; + uint8_t ** outptr; + uint8_t * inptr; + int64_t cycles; + PtrInfoBlock block_info; + struct SignalInfo *signals; + Py_ssize_t signal_count; + Py_ssize_t record_size; + Py_ssize_t idx; +#if defined(_WIN32) + HANDLE bytes_ready; + HANDLE block_ready; +#else + pthread_cond_t bytes_ready; + pthread_cond_t block_ready; + pthread_mutex_t bytes_ready_lock; + pthread_mutex_t block_ready_lock; +#endif +} ProcessesingBlock, *PtrProcessesingBlock; + + +void * get_channel_raw_bytes_complete_C(void *lpParam ) +{ + Py_ssize_t count, byte_count, byte_offset, delta, thread_count, param, block_type; + int64_t original_size, compressed_size, block_limit, cycles; + PtrProcessesingBlock thread_info; + thread_info = (PtrProcessesingBlock) lpParam; + PtrInfoBlock block_info; + + Py_ssize_t signal_count, thread_idx, record_size, in_size, cols, lines; + + record_size = thread_info->record_size; + + int result; + + uint8_t *outptr, *inptr, *write; + uint8_t *pUncomp, *read; + + while (1) { +#if defined(_WIN32) + WaitForSingleObject(thread_info->block_ready, INFINITE); + ResetEvent(thread_info->block_ready); + if (thread_info->stop) break; +#else + pthread_mutex_lock(&thread_info->block_ready_lock); + pthread_cond_wait(&thread_info->block_ready, &thread_info->block_ready_lock); + pthread_mutex_unlock(&thread_info->block_ready_lock); +#endif + + inptr = thread_info->inptr; + original_size = thread_info->block_info->original_size; + compressed_size = thread_info->block_info->compressed_size; + param = thread_info->block_info->param; + block_type = thread_info->block_info->block_type; + + cols = param; + lines = original_size / cols; + + // decompress + count = original_size / record_size; + + pUncomp = (uint8_t *) malloc(original_size); + struct libdeflate_decompressor *decompressor = libdeflate_alloc_decompressor(); + libdeflate_zlib_decompress(decompressor, + inptr, compressed_size, + pUncomp, original_size, + NULL); + libdeflate_free_decompressor(decompressor); + + // reverse transposition + if (block_type == 2) { + read = pUncomp; + outptr = (uint8_t *) malloc(original_size); + + for (int j = 0; j < (Py_ssize_t)cols; j++) + { + write = outptr + j; + for (int i = 0; i < (Py_ssize_t)lines; i++) { - write = write_original + j; - for (i = 0; i < (Py_ssize_t)lines; i++) - { - *write = *read++; - write += cols; - } + *write = *read++; + write += cols; } + } + free(pUncomp); + pUncomp = outptr; + } - if (count) - memcpy(write_original + (Py_ssize_t)(lines * cols), read, (Py_ssize_t)count); - return values; + if (thread_info->block_info->block_limit >= 0) { + cycles = thread_info->block_info->block_limit / record_size ; + thread_info->cycles = cycles; + } + else { + cycles = count; + thread_info->cycles = count; } + + for (int i =0; isignal_count; i++) { + byte_offset = thread_info->signals[i].byte_offset; + byte_count = thread_info->signals[i].byte_count; + + outptr = (uint8_t *) malloc(cycles * byte_count); + + read = pUncomp + byte_offset; + write = outptr; + + for (Py_ssize_t j = 0; j < cycles; j++) + { + memcpy(write, read, byte_count); + write += byte_count; + read += record_size; + } + thread_info->outptr[i] = outptr; + } + + free(pUncomp); + +#if defined(_WIN32) + SetEvent(thread_info->bytes_ready); +#else + pthread_mutex_lock(&thread_info->bytes_ready_lock); + pthread_cond_signal(&thread_info->bytes_ready); + pthread_mutex_unlock(&thread_info->bytes_ready_lock); +#endif + + } + return 0; } -static PyObject *bytes_dtype_size(PyObject *self, PyObject *args) + +static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) { - Py_ssize_t i = 0, j = 0; - Py_ssize_t count, size = 0, current_size=0; - PyObject *data, *values, **pointer; - char *read, *write_original, *write; - bool all_bytes = true; + Py_ssize_t info_count, signal_count, signal_and_invalidation_count, thread_count=11; + PyObject *data_blocks_info, *signals, *out = NULL, *item, *ref, *obj; + + char *outptr, *file_name; + char *read_pos = NULL, *write_pos = NULL; + Py_ssize_t position = 0, record_size = 0, + cycles, step = 0, invalidation_bytes; + Py_ssize_t isize = 0, offset = 0; + int is_list; + int64_t byte_offset, byte_count, new_cycles; + int32_t invalidation_bit_position; + + PtrInfoBlock block_info; + InfoBlock info_block; + PtrProcessesingBlock thread_info; + PtrProcessesingBlock thread; + + FILE *fptr; + uint8_t *buffer; + int result; + + if (!PyArg_ParseTuple(args, "OOsnnns|nn", + &data_blocks_info, &signals, &file_name, &cycles, &record_size, &invalidation_bytes, + &thread_count)) + { + return NULL; + } + else + { + fptr = fopen(file_name,"rb"); + +#if defined(_WIN32) + HANDLE *hThreads, *block_ready, *bytes_ready; + DWORD *dwThreadIdArray; + hThreads = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); + dwThreadIdArray = (DWORD *) malloc(sizeof(DWORD) * thread_count); + block_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); + bytes_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); +#else + pthread_t *dwThreadIdArray = (pthread_t *) malloc(sizeof(pthread_t) * thread_count); - if (!PyArg_ParseTuple(args, "O", &data)) - { - return NULL; + pthread_mutex_t *bytes_ready_locks, *block_ready_locks; // Declare mutex + pthread_cond_t *block_ready, *bytes_ready; + + block_ready = (pthread_cond_t *) malloc(sizeof(pthread_cond_t) * thread_count); + bytes_ready = (pthread_cond_t *) malloc(sizeof(pthread_cond_t) * thread_count); + bytes_ready_locks = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t) * thread_count); + block_ready_locks = (pthread_mutex_t *) malloc(sizeof(pthread_mutex_t) * thread_count); +#endif + + PtrSignalInfo signal_info; + + is_list = PyList_Check(signals); + if (is_list) { + signal_count = PyList_Size(signals); } - else + else { + signal_count = PyTuple_Size(signals); + } + + if (invalidation_bytes) { + signal_and_invalidation_count = signal_count +1; + signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * (signal_count + 1)); + } + else { + signal_and_invalidation_count = signal_count; + signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * signal_count); + } + for (int i=0; i size) size = current_size; + if (info_count < thread_count) { + thread_count = info_count; + } + block_info = (PtrInfoBlock) malloc(sizeof(InfoBlock) * info_count); + thread_info = (PtrProcessesingBlock) malloc(sizeof(ProcessesingBlock) * thread_count); + + for (int i=0; i= thread_count) { +#if defined(_WIN32) + WaitForSingleObject(bytes_ready[position], INFINITE); + ResetEvent(bytes_ready[position]); +#else + pthread_mutex_lock(&bytes_ready_locks[position]); + pthread_cond_wait(&bytes_ready[position], &bytes_ready_locks[position]); + pthread_mutex_unlock(&bytes_ready_locks[position]); +#endif + new_cycles = thread->cycles; + thread->cycles = 0; + for (int j=0; joutptr[j], signal_info[j].byte_count * new_cycles); + signal_info[j].data_position += signal_info[j].byte_count * new_cycles; + free(thread->outptr[j]); + } + free(thread->inptr); + } + + thread->block_info = &block_info[i]; + buffer = (uint8_t *) malloc(block_info[i].compressed_size); + _fseeki64(fptr, block_info[i].address, 0); + result = fread(buffer, 1, block_info[i].compressed_size, fptr); + thread->inptr = buffer; + +#if defined(_WIN32) + SetEvent(block_ready[position]); +#else + pthread_mutex_lock(&block_ready_locks[position]); + pthread_cond_signal(&block_ready[position]); + pthread_mutex_unlock(&block_ready_locks[position]); +#endif + + position++; + if (position == thread_count) position = 0; + + } + + for (int i=0; icycles; + thread->cycles = 0; + for (int j=0; joutptr[j], signal_info[j].byte_count * new_cycles); + signal_info[j].data_position += signal_info[j].byte_count * new_cycles; + free(thread->outptr[j]); + } + + thread->stop = 1; + +#if defined(_WIN32) + SetEvent(block_ready[position]); +#else + pthread_mutex_lock(&block_ready_locks[position]); + pthread_cond_signal(&block_ready[position]); + pthread_mutex_unlock(&block_ready_locks[position]); +#endif + + position++; + if (position == thread_count) position = 0; + } + +#if defined(_WIN32) + WaitForMultipleObjects(thread_count, hThreads, true, INFINITE); + for (int i=0; i< thread_count; i++) { + CloseHandle(hThreads[i]); + CloseHandle(block_ready[i]); + CloseHandle(bytes_ready[i]); + } +#else + for (int i=0; i< thread_count; i++) { + pthread_join(dwThreadIdArray[i], NULL); + } +#endif + + for (int i=0; i Date: Fri, 20 Dec 2024 13:09:04 +0200 Subject: [PATCH 24/60] 64 bit fseek and ftell --- src/asammdf/blocks/cutils.c | 48 +++++++++++++++++++----------------- src/asammdf/blocks/mdf_v4.py | 6 +++++ 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index ab5cc7b3b..32b18188c 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -9,16 +9,18 @@ #include #include #include -#include "data_block_utilities.c" #if defined(_WIN32) -#include -#include + #include + #include + #define FSEEK64(file, address, whence) _fseeki64((file), (address), (whence)) + #define FTELL64(file) _ftelli64(file) #else -#include -#include -#define Sleep(x) usleep((int)(1000 * (x))) -#include + #include + #include + #define Sleep(x) usleep((int)(1000 * (x))) + #define FSEEK64(file, address, whence) fseek0((file), (address), (whence)) + #define FTELL64(file) ftello(file) #endif #define MAX(a,b) ((a) > (b) ? (a) : (b)) @@ -2040,9 +2042,9 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) pUncomp = (uint8_t *) malloc(original_size); struct libdeflate_decompressor *decompressor = libdeflate_alloc_decompressor(); libdeflate_zlib_decompress(decompressor, - inptr, compressed_size, - pUncomp, original_size, - NULL); + inptr, compressed_size, + pUncomp, original_size, + NULL); libdeflate_free_decompressor(decompressor); // reverse transposition @@ -2090,13 +2092,13 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) } thread_info->outptr[i] = outptr; } - + free(pUncomp); - + #if defined(_WIN32) SetEvent(thread_info->bytes_ready); #else - pthread_mutex_lock(&thread_info->bytes_ready_lock); + pthread_mutex_lock(&thread_info->bytes_ready_lock); pthread_cond_signal(&thread_info->bytes_ready); pthread_mutex_unlock(&thread_info->bytes_ready_lock); #endif @@ -2342,7 +2344,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) ); } #else - for (int i=0; i< thread_count; i++) { + for (int i=0; i< thread_count; i++) { pthread_create(&(dwThreadIdArray[i]), NULL, get_channel_raw_bytes_complete_C, &thread_info[i]); } #endif @@ -2360,7 +2362,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) WaitForSingleObject(bytes_ready[position], INFINITE); ResetEvent(bytes_ready[position]); #else - pthread_mutex_lock(&bytes_ready_locks[position]); + pthread_mutex_lock(&bytes_ready_locks[position]); pthread_cond_wait(&bytes_ready[position], &bytes_ready_locks[position]); pthread_mutex_unlock(&bytes_ready_locks[position]); #endif @@ -2376,14 +2378,14 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread->block_info = &block_info[i]; buffer = (uint8_t *) malloc(block_info[i].compressed_size); - _fseeki64(fptr, block_info[i].address, 0); + FSEEK64(fptr, block_info[i].address, 0); result = fread(buffer, 1, block_info[i].compressed_size, fptr); thread->inptr = buffer; - + #if defined(_WIN32) SetEvent(block_ready[position]); #else - pthread_mutex_lock(&block_ready_locks[position]); + pthread_mutex_lock(&block_ready_locks[position]); pthread_cond_signal(&block_ready[position]); pthread_mutex_unlock(&block_ready_locks[position]); #endif @@ -2400,7 +2402,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) WaitForSingleObject(bytes_ready[position], INFINITE); ResetEvent(bytes_ready[position]); #else - pthread_mutex_lock(&bytes_ready_locks[position]); + pthread_mutex_lock(&bytes_ready_locks[position]); pthread_cond_wait(&bytes_ready[position], &bytes_ready_locks[position]); pthread_mutex_unlock(&bytes_ready_locks[position]); #endif @@ -2417,7 +2419,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) #if defined(_WIN32) SetEvent(block_ready[position]); #else - pthread_mutex_lock(&block_ready_locks[position]); + pthread_mutex_lock(&block_ready_locks[position]); pthread_cond_signal(&block_ready[position]); pthread_mutex_unlock(&block_ready_locks[position]); #endif @@ -2434,7 +2436,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) CloseHandle(bytes_ready[i]); } #else - for (int i=0; i< thread_count; i++) { + for (int i=0; i< thread_count; i++) { pthread_join(dwThreadIdArray[i], NULL); } #endif @@ -2472,9 +2474,9 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) free(bytes_ready); free(dwThreadIdArray); #if defined(_WIN32) - free(hThreads); + free(hThreads); #else - free(bytes_ready_locks); + free(bytes_ready_locks); free(block_ready_locks); #endif diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 264c9446e..7fbecc758 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -1884,6 +1884,8 @@ def _get_data_blocks_info( else: READ_CHUNK_SIZE = 32 * 1024 * 1024 + READ_CHUNK_SIZE = min(READ_CHUNK_SIZE, total_size) + if mapped: if address: id_string, block_len = COMMON_SHORT_uf(stream, address) @@ -1892,6 +1894,8 @@ def _get_data_blocks_info( if id_string == block_type: size = block_len - 24 if size: + size = min(size, total_size) + address = address + COMMON_SIZE # split the DTBLOCK into chucks of up to 32MB @@ -1966,6 +1970,8 @@ def _get_data_blocks_info( if id_string == block_type: size = block_len - 24 if size: + size = min(size, total_size) + addr += COMMON_SIZE # split the DTBLOCK into chucks of up to 32MB From 9ec23428465a36e562e7534d0f4f602795fb0817 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:26:01 +0200 Subject: [PATCH 25/60] fix free and arg parsing --- src/asammdf/blocks/cutils.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 32b18188c..a9ff63ae8 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -11,16 +11,16 @@ #include #if defined(_WIN32) - #include - #include - #define FSEEK64(file, address, whence) _fseeki64((file), (address), (whence)) - #define FTELL64(file) _ftelli64(file) +#include +#include +#define FSEEK64(file, address, whence) _fseeki64((file), (address), (whence)) +#define FTELL64(file) _ftelli64(file) #else - #include - #include - #define Sleep(x) usleep((int)(1000 * (x))) - #define FSEEK64(file, address, whence) fseek0((file), (address), (whence)) - #define FTELL64(file) ftello(file) +#include +#include +#define Sleep(x) usleep((int)(1000 * (x))) +#define FSEEK64(file, address, whence) fseek0((file), (address), (whence)) +#define FTELL64(file) ftello(file) #endif #define MAX(a,b) ((a) > (b) ? (a) : (b)) @@ -2131,7 +2131,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) uint8_t *buffer; int result; - if (!PyArg_ParseTuple(args, "OOsnnns|nn", + if (!PyArg_ParseTuple(args, "OOsnnn|n", &data_blocks_info, &signals, &file_name, &cycles, &record_size, &invalidation_bytes, &thread_count)) { @@ -2448,6 +2448,8 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) } fclose(fptr); + + printf("tuples\n"); out = PyTuple_New(signal_count); @@ -2469,10 +2471,10 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) } free(signal_info); - free(hThreads); free(block_ready); free(bytes_ready); free(dwThreadIdArray); + #if defined(_WIN32) free(hThreads); #else From 2e43ccc77463c8e926f0d79677d79504b18307f7 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:44:10 +0200 Subject: [PATCH 26/60] submodule --- .github/workflows/wheels.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bc48423b5..868fd5219 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -13,6 +13,9 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: 'true' + token: ${{ secrets.submodules_PAT }} # Used to host cibuildwheel - uses: actions/setup-python@v5 From b8fea024070407dda77b3ffd4c42ff3947152721 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:46:26 +0200 Subject: [PATCH 27/60] recursive --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 868fd5219..af331ce77 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -14,7 +14,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - submodules: 'true' + submodules: recursive token: ${{ secrets.submodules_PAT }} # Used to host cibuildwheel From bcebc7cfc7798f1e0171eff2b889c732bcd84544 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:49:43 +0200 Subject: [PATCH 28/60] submodules actions --- .github/workflows/build.yml | 3 +++ .github/workflows/main.yml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 963e65416..af7ef2a55 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,6 +12,9 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: recursive + token: ${{ secrets.submodules_PAT }} - name: Set up Python uses: actions/setup-python@v5 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 48b1c8570..6524e81a3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,6 +17,9 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: recursive + token: ${{ secrets.submodules_PAT }} - name: Set up Python uses: actions/setup-python@v5 From ef80f2189a3867eda7eab13ee7ca896bbf53fef9 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:55:11 +0200 Subject: [PATCH 29/60] ghilimele --- .github/workflows/build.yml | 2 +- .github/workflows/main.yml | 2 +- .github/workflows/wheels.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index af7ef2a55..1bf7b24fe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - submodules: recursive + submodules: 'recursive' token: ${{ secrets.submodules_PAT }} - name: Set up Python diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6524e81a3..3ea853b1a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - submodules: recursive + submodules: 'recursive' token: ${{ secrets.submodules_PAT }} - name: Set up Python diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index af331ce77..43293819b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -14,7 +14,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - submodules: recursive + submodules: 'recursive' token: ${{ secrets.submodules_PAT }} # Used to host cibuildwheel From 7ff1b68a23ce8c12257c1a09858e05566ffcdc1a Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:56:37 +0200 Subject: [PATCH 30/60] space --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 43293819b..7d194983f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v4 - with: + with: submodules: 'recursive' token: ${{ secrets.submodules_PAT }} From e9e7ef7bc28c7fd1791afe041f90e64e8f04338f Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 13:59:17 +0200 Subject: [PATCH 31/60] SUBMODULE_PAT --- .github/workflows/build.yml | 4 ++-- .github/workflows/coveralls.yml | 3 +++ .github/workflows/main.yml | 4 ++-- .github/workflows/wheels.yml | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1bf7b24fe..1cca74b96 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,9 +12,9 @@ jobs: steps: - uses: actions/checkout@v4 - with: + with: submodules: 'recursive' - token: ${{ secrets.submodules_PAT }} + token: ${{ secrets.SUBMODULE_PAT }} - name: Set up Python uses: actions/setup-python@v5 diff --git a/.github/workflows/coveralls.yml b/.github/workflows/coveralls.yml index a65d2a41c..7da631517 100644 --- a/.github/workflows/coveralls.yml +++ b/.github/workflows/coveralls.yml @@ -12,6 +12,9 @@ jobs: steps: - uses: actions/checkout@v4 + with: + submodules: 'recursive' + token: ${{ secrets.SUBMODULE_PAT }} - name: Set up Python uses: actions/setup-python@v5 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3ea853b1a..823f7afeb 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,9 +17,9 @@ jobs: steps: - uses: actions/checkout@v4 - with: + with: submodules: 'recursive' - token: ${{ secrets.submodules_PAT }} + token: ${{ secrets.SUBMODULE_PAT }} - name: Set up Python uses: actions/setup-python@v5 diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7d194983f..f410bb531 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v4 with: submodules: 'recursive' - token: ${{ secrets.submodules_PAT }} + token: ${{ secrets.SUBMODULE_PAT }} # Used to host cibuildwheel - uses: actions/setup-python@v5 From c617d4e2cc6f691ce7c9065b632ae1abf2bc7134 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 14:03:33 +0200 Subject: [PATCH 32/60] fix for *nix --- src/asammdf/blocks/cutils.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index a9ff63ae8..2e7f4c270 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2236,6 +2236,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread_info = (PtrProcessesingBlock) malloc(sizeof(ProcessesingBlock) * thread_count); for (int i=0; i Date: Fri, 20 Dec 2024 14:05:43 +0200 Subject: [PATCH 33/60] if defined --- src/asammdf/blocks/cutils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 2e7f4c270..3f8cd5d2e 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2236,7 +2236,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread_info = (PtrProcessesingBlock) malloc(sizeof(ProcessesingBlock) * thread_count); for (int i=0; i Date: Fri, 20 Dec 2024 14:07:56 +0200 Subject: [PATCH 34/60] fseeko --- src/asammdf/blocks/cutils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 3f8cd5d2e..421bb430e 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -19,7 +19,7 @@ #include #include #define Sleep(x) usleep((int)(1000 * (x))) -#define FSEEK64(file, address, whence) fseek0((file), (address), (whence)) +#define FSEEK64(file, address, whence) fseeko((file), (address), (whence)) #define FTELL64(file) ftello(file) #endif From bd6f5eb5057ea3939036b09b6b653de5e437e7b3 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 20 Dec 2024 14:51:53 +0200 Subject: [PATCH 35/60] set(CMAKE_POSITION_INDEPENDENT_CODE ON) --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index fc74cb7d3..26981b132 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,6 +11,8 @@ find_package( NumPy ) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + # Add submodule libdeflate add_subdirectory(ext/libdeflate EXCLUDE_FROM_ALL) From 4690daee6e02b0907e74ddf033f27c2e6a2ffcb8 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Sat, 21 Dec 2024 09:17:38 +0200 Subject: [PATCH 36/60] 10% better transpose --- src/asammdf/blocks/cutils.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 421bb430e..bd40b309b 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2012,15 +2012,21 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) record_size = thread_info->record_size; int result; + clock_t start, end; + double t1=0, t2=0, t3=0; uint8_t *outptr, *inptr, *write; uint8_t *pUncomp, *read; while (1) { #if defined(_WIN32) + start = clock(); WaitForSingleObject(thread_info->block_ready, INFINITE); ResetEvent(thread_info->block_ready); + end = clock(); + t3 += end - start; if (thread_info->stop) break; + #else pthread_mutex_lock(&thread_info->block_ready_lock); pthread_cond_wait(&thread_info->block_ready, &thread_info->block_ready_lock); @@ -2051,7 +2057,10 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) if (block_type == 2) { read = pUncomp; outptr = (uint8_t *) malloc(original_size); + + + start = clock(); for (int j = 0; j < (Py_ssize_t)cols; j++) { write = outptr + j; @@ -2061,6 +2070,19 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) write += cols; } } + end = clock(); + t1 += end - start; + + start = clock(); + read=pUncomp; + for (int i = 0; i < lines ; i++) + { + for (int j=0; j= thread_count) { #if defined(_WIN32) + start = clock(); WaitForSingleObject(bytes_ready[position], INFINITE); + end = clock(); + tt += end - start; ResetEvent(bytes_ready[position]); #else pthread_mutex_lock(&bytes_ready_locks[position]); @@ -2395,6 +2423,8 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) if (position == thread_count) position = 0; } + + printf("TT=%lf\n", tt); for (int i=0; i Date: Thu, 26 Dec 2024 12:07:45 +0200 Subject: [PATCH 37/60] before optim --- src/asammdf/blocks/cutils.c | 132 +++++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 48 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index bd40b309b..267fe1e8a 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1954,6 +1954,18 @@ static PyObject *bytes_dtype_size(PyObject *self, PyObject *args) } +void transpose(uint8_t * restrict dst, uint8_t * restrict src, uint64_t p, uint64_t n, size_t block){ + for (size_t i = 0; i < n; i += block) { + for(size_t j = 0; j < p; ++j) { + for(size_t b = 0; b < block && i + b < n; ++b) { + dst[j*n + i + b] = src[(i + b)*p + j]; + } + } + } +} + + + typedef struct InfoBlock { int64_t address; int64_t original_size; @@ -2013,7 +2025,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) int result; clock_t start, end; - double t1=0, t2=0, t3=0; + double t1=0, t2=0, t3=0, t4=0, t5=0, t6=0, t7=0; uint8_t *outptr, *inptr, *write; uint8_t *pUncomp, *read; @@ -2059,20 +2071,16 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) outptr = (uint8_t *) malloc(original_size); - - start = clock(); - for (int j = 0; j < (Py_ssize_t)cols; j++) - { - write = outptr + j; - for (int i = 0; i < (Py_ssize_t)lines; i++) - { - *write = *read++; - write += cols; - } - } +start = clock(); + transpose(outptr, pUncomp, lines, cols, 32); end = clock(); - t1 += end - start; + t5 += end - start; + start = clock(); + transpose(outptr, pUncomp, lines, cols, 64); + end = clock(); + t6 += end - start; + start = clock(); read=pUncomp; for (int i = 0; i < lines ; i++) @@ -2083,6 +2091,29 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) end = clock(); t2 += end - start; + /* start = clock(); + transpose(outptr, pUncomp, lines, cols, 8); + end = clock(); + t3 += end - start; + + start = clock(); + transpose(outptr, pUncomp, lines, cols, 16); + end = clock(); + t4 += end - start;*/ + + + + start = clock(); + transpose(outptr, pUncomp, lines, cols, 11); + end = clock(); + t1 += end - start; + + /* start = clock(); + transpose(outptr, pUncomp, lines, cols, 47198); + end = clock(); + t7 += end - start;*/ + + free(pUncomp); pUncomp = outptr; } @@ -2126,7 +2157,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) #endif } - printf("t1=%lf t2=%lf t3=%lf\n", t1, t2, t3); + printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); return 0; } @@ -2142,7 +2173,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) cycles, step = 0, invalidation_bytes; Py_ssize_t isize = 0, offset = 0; int is_list; - int64_t byte_offset, byte_count, new_cycles; + int64_t byte_offset, byte_count, new_cycles, max_uncompressed, max_compressed; int32_t invalidation_bit_position; PtrInfoBlock block_info; @@ -2260,39 +2291,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) block_info = (PtrInfoBlock) malloc(sizeof(InfoBlock) * info_count); thread_info = (PtrProcessesingBlock) malloc(sizeof(ProcessesingBlock) * thread_count); - for (int i=0; i Date: Thu, 26 Dec 2024 18:02:16 +0200 Subject: [PATCH 38/60] optimized ?:? --- src/asammdf/blocks/cutils.c | 209 ++++++++++++++++++----------------- src/asammdf/blocks/mdf_v4.py | 6 +- 2 files changed, 113 insertions(+), 102 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 267fe1e8a..14ba56b26 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1954,14 +1954,14 @@ static PyObject *bytes_dtype_size(PyObject *self, PyObject *args) } -void transpose(uint8_t * restrict dst, uint8_t * restrict src, uint64_t p, uint64_t n, size_t block){ - for (size_t i = 0; i < n; i += block) { - for(size_t j = 0; j < p; ++j) { - for(size_t b = 0; b < block && i + b < n; ++b) { - dst[j*n + i + b] = src[(i + b)*p + j]; - } - } +void transpose(uint8_t * restrict dst, uint8_t * restrict src, uint64_t p, uint64_t n, size_t block) { + for (size_t i = 0; i < n; i += block) { + for(size_t j = 0; j < p; ++j) { + for(size_t b = 0; b < block && i + b < n; ++b) { + dst[j*n + i + b] = src[(i + b)*p + j]; + } } + } } @@ -2014,7 +2014,7 @@ typedef struct ProcessesingBlock { void * get_channel_raw_bytes_complete_C(void *lpParam ) { Py_ssize_t count, byte_count, byte_offset, delta, thread_count, param, block_type; - int64_t original_size, compressed_size, block_limit, cycles; + int64_t original_size, compressed_size, block_limit, cycles, current_uncompressed_size=0, current_out_size=0, max_cycles=0; PtrProcessesingBlock thread_info; thread_info = (PtrProcessesingBlock) lpParam; PtrInfoBlock block_info; @@ -2028,24 +2028,31 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) double t1=0, t2=0, t3=0, t4=0, t5=0, t6=0, t7=0; uint8_t *outptr, *inptr, *write; - uint8_t *pUncomp, *read; + uint8_t *pUncomp=NULL, *pUncompTr=NULL, *read, *data_ptr; + + inptr = thread_info->inptr; + + for (int i =0; isignal_count; i++) { + thread_info->outptr[i] = NULL; + } while (1) { #if defined(_WIN32) - start = clock(); + start = clock(); WaitForSingleObject(thread_info->block_ready, INFINITE); ResetEvent(thread_info->block_ready); end = clock(); t3 += end - start; if (thread_info->stop) break; - + + //printf("Thr %d processing\n", thread_info->idx); + #else pthread_mutex_lock(&thread_info->block_ready_lock); pthread_cond_wait(&thread_info->block_ready, &thread_info->block_ready_lock); pthread_mutex_unlock(&thread_info->block_ready_lock); #endif - inptr = thread_info->inptr; original_size = thread_info->block_info->original_size; compressed_size = thread_info->block_info->compressed_size; param = thread_info->block_info->param; @@ -2054,88 +2061,83 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) cols = param; lines = original_size / cols; - // decompress count = original_size / record_size; - pUncomp = (uint8_t *) malloc(original_size); - struct libdeflate_decompressor *decompressor = libdeflate_alloc_decompressor(); - libdeflate_zlib_decompress(decompressor, - inptr, compressed_size, - pUncomp, original_size, - NULL); - libdeflate_free_decompressor(decompressor); - - // reverse transposition - if (block_type == 2) { - read = pUncomp; - outptr = (uint8_t *) malloc(original_size); - - -start = clock(); - transpose(outptr, pUncomp, lines, cols, 32); - end = clock(); - t5 += end - start; - - start = clock(); - transpose(outptr, pUncomp, lines, cols, 64); - end = clock(); - t6 += end - start; - - start = clock(); - read=pUncomp; - for (int i = 0; i < lines ; i++) - { - for (int j=0; jblock_info->block_limit >= 0) { cycles = thread_info->block_info->block_limit / record_size ; thread_info->cycles = cycles; } else { cycles = count; - thread_info->cycles = count; + thread_info->cycles = cycles; + } + + if (block_type == 0) { + data_ptr = inptr; + + } + else { + + // decompress + if (original_size > current_uncompressed_size) { + //printf("\tThr %d new ptr\n", thread_info->idx); + if (pUncomp) free(pUncomp); + pUncomp = (uint8_t *) malloc(original_size); + //if (!pUncomp) printf("\tThr %d pUncomp error\n", thread_info->idx); + current_uncompressed_size=original_size; + } + //printf("\tThr %d start decompress %p %d\n", thread_info->idx, inptr, compressed_size); + struct libdeflate_decompressor *decompressor = libdeflate_alloc_decompressor(); + libdeflate_zlib_decompress(decompressor, + inptr, compressed_size, + pUncomp, original_size, + NULL); + libdeflate_free_decompressor(decompressor); + + //printf("\tThr %d decmpressed\n", thread_info->idx); + + // reverse transposition + if (block_type == 2) { + if (current_out_size < original_size) { + if (pUncompTr) free(pUncompTr); + pUncompTr = (uint8_t *) malloc(original_size); + //if (!pUncompTr) printf("\tThr %d pUncompTr error\n", thread_info->idx); + current_out_size = original_size; + } + + start = clock(); + for (int j=0; jidx); + + } + else { + data_ptr = pUncomp; + } } + //printf("\tThr %d %d %d\n", thread_info->idx, cycles, max_cycles); + for (int i =0; isignal_count; i++) { byte_offset = thread_info->signals[i].byte_offset; byte_count = thread_info->signals[i].byte_count; - outptr = (uint8_t *) malloc(cycles * byte_count); - - read = pUncomp + byte_offset; - write = outptr; + if (max_cycles < cycles) { + if (thread_info->outptr[i]) { + free(thread_info->outptr[i]); + } + thread_info->outptr[i] = (uint8_t *) malloc(cycles * byte_count); + if (!thread_info->outptr[i]) printf("Thr %d thread_info->outptr[%d] error\n", thread_info->idx,i); + } + + read = data_ptr + byte_offset; + write = thread_info->outptr[i]; for (Py_ssize_t j = 0; j < cycles; j++) { @@ -2143,10 +2145,12 @@ start = clock(); write += byte_count; read += record_size; } - thread_info->outptr[i] = outptr; + } + + if (max_cycles < cycles) max_cycles = cycles; - free(pUncomp); + //printf("\tThr %d set event\n", thread_info->idx); #if defined(_WIN32) SetEvent(thread_info->bytes_ready); @@ -2157,6 +2161,11 @@ start = clock(); #endif } + for (int i =0; isignal_count; i++) { + if (thread_info->outptr[i]) free(thread_info->outptr[i]); + } + if (pUncomp) free(pUncomp); + if (pUncompTr) free(pUncompTr); printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); return 0; } @@ -2173,7 +2182,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) cycles, step = 0, invalidation_bytes; Py_ssize_t isize = 0, offset = 0; int is_list; - int64_t byte_offset, byte_count, new_cycles, max_uncompressed, max_compressed; + int64_t byte_offset, byte_count, new_cycles, max_uncompressed=0, max_compressed=0; int32_t invalidation_bit_position; PtrInfoBlock block_info; @@ -2339,7 +2348,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) ref = PyObject_GetAttrString( item, "block_limit"); - + if (ref == Py_None) { block_info[i].block_limit = -1; } @@ -2347,12 +2356,14 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) block_info[i].block_limit = PyLong_AsLongLong(ref); } Py_XDECREF(ref); - - //max_compressed = MAX(max_compressed, block_info[i].compressed_size); + + max_compressed = MAX(max_compressed, block_info[i].compressed_size); //max_uncompressed = max_uncompressed, block_info[i].original_size); } - + + //printf("NAX compressed=%ld\n", max_compressed); + for (int i=0; i= thread_count) { #if defined(_WIN32) - start = clock(); + start = clock(); WaitForSingleObject(bytes_ready[position], INFINITE); end = clock(); tt += end - start; @@ -2432,20 +2444,17 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) pthread_mutex_unlock(&bytes_ready_locks[position]); #endif new_cycles = thread->cycles; - thread->cycles = 0; for (int j=0; joutptr[j], signal_info[j].byte_count * new_cycles); signal_info[j].data_position += signal_info[j].byte_count * new_cycles; - free(thread->outptr[j]); } - free(thread->inptr); } thread->block_info = &block_info[i]; - buffer = (uint8_t *) malloc(block_info[i].compressed_size); FSEEK64(fptr, block_info[i].address, 0); - result = fread(buffer, 1, block_info[i].compressed_size, fptr); - thread->inptr = buffer; + //printf("size=%ld %p\n", block_info[i].compressed_size, thread->inptr); + result = fread(thread->inptr, 1, block_info[i].compressed_size, fptr); + //printf("read result=%lld\n", result); #if defined(_WIN32) SetEvent(block_ready[position]); @@ -2459,7 +2468,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) if (position == thread_count) position = 0; } - + printf("TT=%lf\n", tt); for (int i=0; ioutptr[j], signal_info[j].byte_count * new_cycles); signal_info[j].data_position += signal_info[j].byte_count * new_cycles; - free(thread->outptr[j]); } thread->stop = 1; @@ -2508,7 +2516,10 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) } #endif - for (int i=0; i Iterator[DataBlockInfo]: mapped = mapped or not is_file_like(stream) - if record_size > 32 * 1024 * 1024: + if record_size > 4 * 1024 * 1024: READ_CHUNK_SIZE = record_size elif record_size: - READ_CHUNK_SIZE = 32 * 1024 * 1024 // record_size * record_size + READ_CHUNK_SIZE = 4 * 1024 * 1024 // record_size * record_size else: - READ_CHUNK_SIZE = 32 * 1024 * 1024 + READ_CHUNK_SIZE = 4 * 1024 * 1024 READ_CHUNK_SIZE = min(READ_CHUNK_SIZE, total_size) From 3e74974ad9277f176108ba342068f7f9d7d9a311 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Thu, 26 Dec 2024 18:54:03 +0200 Subject: [PATCH 39/60] winapi mmap --- src/asammdf/blocks/cutils.c | 86 +++++++++++++++++++++++++++++++------ 1 file changed, 72 insertions(+), 14 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 14ba56b26..1ef170b9f 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2080,7 +2080,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // decompress if (original_size > current_uncompressed_size) { - //printf("\tThr %d new ptr\n", thread_info->idx); + printf("\tThr %d new ptr\n", thread_info->idx); if (pUncomp) free(pUncomp); pUncomp = (uint8_t *) malloc(original_size); //if (!pUncomp) printf("\tThr %d pUncomp error\n", thread_info->idx); @@ -2099,6 +2099,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // reverse transposition if (block_type == 2) { if (current_out_size < original_size) { + printf("\tThr %d new trtrtrptr\n", thread_info->idx); if (pUncompTr) free(pUncompTr); pUncompTr = (uint8_t *) malloc(original_size); //if (!pUncompTr) printf("\tThr %d pUncompTr error\n", thread_info->idx); @@ -2132,6 +2133,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) if (thread_info->outptr[i]) { free(thread_info->outptr[i]); } + printf("\tThr %d sig i=%d malloc\n", thread_info->idx); thread_info->outptr[i] = (uint8_t *) malloc(cycles * byte_count); if (!thread_info->outptr[i]) printf("Thr %d thread_info->outptr[%d] error\n", thread_info->idx,i); } @@ -2148,8 +2150,10 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) } - if (max_cycles < cycles) max_cycles = cycles; - + if (max_cycles < cycles) { + printf("\tThr %d malloc cyc=%d max_cycles=%d\n", thread_info->idx, cycles, max_cycles); + max_cycles = cycles; +} //printf("\tThr %d set event\n", thread_info->idx); #if defined(_WIN32) @@ -2204,7 +2208,62 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) } else { - fptr = fopen(file_name,"rb"); + //fptr = fopen(file_name,"rb"); + TCHAR *lpFileName = TEXT(file_name); + HANDLE hFile; + HANDLE hMap; + LPVOID lpBasePtr; + LARGE_INTEGER liFileSize; + + hFile = CreateFile(file_name, + GENERIC_READ, // dwDesiredAccess + 0, // dwShareMode + NULL, // lpSecurityAttributes + OPEN_EXISTING, // dwCreationDisposition + FILE_ATTRIBUTE_NORMAL, // dwFlagsAndAttributes + 0); // hTemplateFile + if (hFile == INVALID_HANDLE_VALUE) { + fprintf(stderr, "CreateFile failed with error %d\n", GetLastError()); + return 1; + } + + if (!GetFileSizeEx(hFile, &liFileSize)) { + fprintf(stderr, "GetFileSize failed with error %d\n", GetLastError()); + CloseHandle(hFile); + return 1; + } + + if (liFileSize.QuadPart == 0) { + fprintf(stderr, "File is empty\n"); + CloseHandle(hFile); + return 1; + } + + hMap = CreateFileMapping( + hFile, + NULL, // Mapping attributes + PAGE_READONLY, // Protection flags + 0, // MaximumSizeHigh + 0, // MaximumSizeLow + NULL); // Name + if (hMap == 0) { + fprintf(stderr, "CreateFileMapping failed with error %d\n", GetLastError()); + CloseHandle(hFile); + return 1; + } + + lpBasePtr = MapViewOfFile( + hMap, + FILE_MAP_READ, // dwDesiredAccess + 0, // dwFileOffsetHigh + 0, // dwFileOffsetLow + 0); // dwNumberOfBytesToMap + if (lpBasePtr == NULL) { + fprintf(stderr, "MapViewOfFile failed with error %d\n", GetLastError()); + CloseHandle(hMap); + CloseHandle(hFile); + return 1; + } #if defined(_WIN32) HANDLE *hThreads, *block_ready, *bytes_ready; @@ -2399,11 +2458,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread_info[i].cycles = 0; } - out = PyByteArray_FromStringAndSize(NULL, cycles * byte_count); - if (!out) - return NULL; - outptr = PyByteArray_AsString(out); - printf("%d threads %d blocks %d cycles %d size\n", thread_count, info_count, cycles, cycles * byte_count); #if defined(_WIN32) @@ -2451,10 +2505,10 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) } thread->block_info = &block_info[i]; - FSEEK64(fptr, block_info[i].address, 0); - //printf("size=%ld %p\n", block_info[i].compressed_size, thread->inptr); - result = fread(thread->inptr, 1, block_info[i].compressed_size, fptr); - //printf("read result=%lld\n", result); + memcpy(thread->inptr, ((uint8_t*)lpBasePtr) + block_info[i].address, block_info[i].compressed_size); + + //FSEEK64(fptr, block_info[i].address, 0); + //result = fread(thread->inptr, 1, block_info[i].compressed_size, fptr); #if defined(_WIN32) SetEvent(block_ready[position]); @@ -2502,6 +2556,10 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) position++; if (position == thread_count) position = 0; } + + UnmapViewOfFile(lpBasePtr); + CloseHandle(hMap); + CloseHandle(hFile); #if defined(_WIN32) WaitForMultipleObjects(thread_count, hThreads, true, INFINITE); @@ -2525,7 +2583,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) free(thread_info); } - fclose(fptr); + //fclose(fptr); printf("tuples\n"); From 9e92437ed96dbfade1369a64b2beb8e221889971 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 27 Dec 2024 09:59:07 +0200 Subject: [PATCH 40/60] mmap --- src/asammdf/blocks/cutils.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 1ef170b9f..85a4aff6d 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2215,12 +2215,12 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) LPVOID lpBasePtr; LARGE_INTEGER liFileSize; - hFile = CreateFile(file_name, + hFile = CreateFile(lpFileName, GENERIC_READ, // dwDesiredAccess - 0, // dwShareMode + FILE_SHARE_READ, // dwShareMode NULL, // lpSecurityAttributes OPEN_EXISTING, // dwCreationDisposition - FILE_ATTRIBUTE_NORMAL, // dwFlagsAndAttributes + FILE_FLAG_RANDOM_ACCESS, // dwFlagsAndAttributes 0); // hTemplateFile if (hFile == INVALID_HANDLE_VALUE) { fprintf(stderr, "CreateFile failed with error %d\n", GetLastError()); From 5d76e4d89d05887b24869354bc7622ce2c4de467 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 27 Dec 2024 15:30:10 +0200 Subject: [PATCH 41/60] style --- src/asammdf/blocks/cutils.c | 123 ++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 60 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 85a4aff6d..3a83b22cb 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2031,9 +2031,9 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) uint8_t *pUncomp=NULL, *pUncompTr=NULL, *read, *data_ptr; inptr = thread_info->inptr; - + for (int i =0; isignal_count; i++) { - thread_info->outptr[i] = NULL; + thread_info->outptr[i] = NULL; } while (1) { @@ -2099,7 +2099,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // reverse transposition if (block_type == 2) { if (current_out_size < original_size) { - printf("\tThr %d new trtrtrptr\n", thread_info->idx); + printf("\tThr %d new trtrtrptr\n", thread_info->idx); if (pUncompTr) free(pUncompTr); pUncompTr = (uint8_t *) malloc(original_size); //if (!pUncompTr) printf("\tThr %d pUncompTr error\n", thread_info->idx); @@ -2108,8 +2108,8 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) start = clock(); for (int j=0; jidx, cycles, max_cycles); + if (max_cycles < cycles) { + printf("\tThr %d malloc cyc=%d max_cycles=%d\n", thread_info->idx, cycles, max_cycles); + + for (int i =0; isignal_count; i++) { + if (max_cycles < cycles) { + if (thread_info->outptr[i]) { + free(thread_info->outptr[i]); + } + printf("\tThr %d sig i=%d malloc\n", thread_info->idx, i); + thread_info->outptr[i] = (uint8_t *) malloc(cycles * thread_info->signals[i].byte_count); + if (!thread_info->outptr[i]) printf("Thr %d thread_info->outptr[%d] error\n", thread_info->idx,i); + } + } + max_cycles = cycles; + } + for (int i =0; isignal_count; i++) { byte_offset = thread_info->signals[i].byte_offset; byte_count = thread_info->signals[i].byte_count; - if (max_cycles < cycles) { - if (thread_info->outptr[i]) { - free(thread_info->outptr[i]); - } - printf("\tThr %d sig i=%d malloc\n", thread_info->idx); - thread_info->outptr[i] = (uint8_t *) malloc(cycles * byte_count); - if (!thread_info->outptr[i]) printf("Thr %d thread_info->outptr[%d] error\n", thread_info->idx,i); - } - read = data_ptr + byte_offset; write = thread_info->outptr[i]; @@ -2149,11 +2156,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) } } - - if (max_cycles < cycles) { - printf("\tThr %d malloc cyc=%d max_cycles=%d\n", thread_info->idx, cycles, max_cycles); - max_cycles = cycles; -} + //printf("\tThr %d set event\n", thread_info->idx); #if defined(_WIN32) @@ -2166,8 +2169,8 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) } for (int i =0; isignal_count; i++) { - if (thread_info->outptr[i]) free(thread_info->outptr[i]); - } + if (thread_info->outptr[i]) free(thread_info->outptr[i]); + } if (pUncomp) free(pUncomp); if (pUncompTr) free(pUncompTr); printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); @@ -2215,54 +2218,54 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) LPVOID lpBasePtr; LARGE_INTEGER liFileSize; - hFile = CreateFile(lpFileName, - GENERIC_READ, // dwDesiredAccess - FILE_SHARE_READ, // dwShareMode - NULL, // lpSecurityAttributes - OPEN_EXISTING, // dwCreationDisposition - FILE_FLAG_RANDOM_ACCESS, // dwFlagsAndAttributes - 0); // hTemplateFile + hFile = CreateFile(lpFileName, + GENERIC_READ, // dwDesiredAccess + FILE_SHARE_READ, // dwShareMode + NULL, // lpSecurityAttributes + OPEN_EXISTING, // dwCreationDisposition + FILE_FLAG_RANDOM_ACCESS | FILE_FLAG_NO_BUFFERING, // dwFlagsAndAttributes + 0); // hTemplateFile if (hFile == INVALID_HANDLE_VALUE) { - fprintf(stderr, "CreateFile failed with error %d\n", GetLastError()); - return 1; + fprintf(stderr, "CreateFile failed with error %d\n", GetLastError()); + return 1; } if (!GetFileSizeEx(hFile, &liFileSize)) { - fprintf(stderr, "GetFileSize failed with error %d\n", GetLastError()); - CloseHandle(hFile); - return 1; + fprintf(stderr, "GetFileSize failed with error %d\n", GetLastError()); + CloseHandle(hFile); + return 1; } if (liFileSize.QuadPart == 0) { - fprintf(stderr, "File is empty\n"); - CloseHandle(hFile); - return 1; + fprintf(stderr, "File is empty\n"); + CloseHandle(hFile); + return 1; } hMap = CreateFileMapping( - hFile, - NULL, // Mapping attributes - PAGE_READONLY, // Protection flags - 0, // MaximumSizeHigh - 0, // MaximumSizeLow - NULL); // Name + hFile, + NULL, // Mapping attributes + PAGE_READONLY, // Protection flags + 0, // MaximumSizeHigh + 0, // MaximumSizeLow + NULL); // Name if (hMap == 0) { - fprintf(stderr, "CreateFileMapping failed with error %d\n", GetLastError()); - CloseHandle(hFile); - return 1; + fprintf(stderr, "CreateFileMapping failed with error %d\n", GetLastError()); + CloseHandle(hFile); + return 1; } lpBasePtr = MapViewOfFile( - hMap, - FILE_MAP_READ, // dwDesiredAccess - 0, // dwFileOffsetHigh - 0, // dwFileOffsetLow - 0); // dwNumberOfBytesToMap + hMap, + FILE_MAP_READ, // dwDesiredAccess + 0, // dwFileOffsetHigh + 0, // dwFileOffsetLow + 0); // dwNumberOfBytesToMap if (lpBasePtr == NULL) { - fprintf(stderr, "MapViewOfFile failed with error %d\n", GetLastError()); - CloseHandle(hMap); - CloseHandle(hFile); - return 1; + fprintf(stderr, "MapViewOfFile failed with error %d\n", GetLastError()); + CloseHandle(hMap); + CloseHandle(hFile); + return 1; } #if defined(_WIN32) @@ -2483,7 +2486,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) for (int i=0; i= thread_count) { #if defined(_WIN32) @@ -2506,7 +2509,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread->block_info = &block_info[i]; memcpy(thread->inptr, ((uint8_t*)lpBasePtr) + block_info[i].address, block_info[i].compressed_size); - + //FSEEK64(fptr, block_info[i].address, 0); //result = fread(thread->inptr, 1, block_info[i].compressed_size, fptr); @@ -2556,10 +2559,10 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) position++; if (position == thread_count) position = 0; } - - UnmapViewOfFile(lpBasePtr); - CloseHandle(hMap); - CloseHandle(hFile); + + UnmapViewOfFile(lpBasePtr); + CloseHandle(hMap); + CloseHandle(hFile); #if defined(_WIN32) WaitForMultipleObjects(thread_count, hThreads, true, INFINITE); From 70b80cdbd148959ce11450f96dab775c65899e73 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 3 Jan 2025 10:59:01 +0200 Subject: [PATCH 42/60] single access --- src/asammdf/blocks/cutils.c | 76 +++++++++++-------------------------- 1 file changed, 22 insertions(+), 54 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 3a83b22cb..89bf6f97e 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1971,6 +1971,7 @@ typedef struct InfoBlock { int64_t original_size; int64_t compressed_size; int64_t block_limit; + int64_t record_offset; Py_ssize_t param; Py_ssize_t block_type; Py_ssize_t idx; @@ -1984,16 +1985,13 @@ typedef struct SignalInfo { int64_t byte_count; int32_t invalidation_bit_position; uint8_t *data; - uint8_t *data_position; PyObject *obj; } SignalInfo, *PtrSignalInfo; typedef struct ProcessesingBlock { uint8_t stop; - uint8_t ** outptr; uint8_t * inptr; - int64_t cycles; PtrInfoBlock block_info; struct SignalInfo *signals; Py_ssize_t signal_count; @@ -2014,7 +2012,7 @@ typedef struct ProcessesingBlock { void * get_channel_raw_bytes_complete_C(void *lpParam ) { Py_ssize_t count, byte_count, byte_offset, delta, thread_count, param, block_type; - int64_t original_size, compressed_size, block_limit, cycles, current_uncompressed_size=0, current_out_size=0, max_cycles=0; + int64_t original_size, compressed_size, record_offset, block_limit, cycles, current_uncompressed_size=0, current_out_size=0, max_cycles=0; PtrProcessesingBlock thread_info; thread_info = (PtrProcessesingBlock) lpParam; PtrInfoBlock block_info; @@ -2032,10 +2030,6 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) inptr = thread_info->inptr; - for (int i =0; isignal_count; i++) { - thread_info->outptr[i] = NULL; - } - while (1) { #if defined(_WIN32) start = clock(); @@ -2057,6 +2051,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) compressed_size = thread_info->block_info->compressed_size; param = thread_info->block_info->param; block_type = thread_info->block_info->block_type; + record_offset = thread_info->block_info->record_offset; cols = param; lines = original_size / cols; @@ -2065,11 +2060,9 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) if (thread_info->block_info->block_limit >= 0) { cycles = thread_info->block_info->block_limit / record_size ; - thread_info->cycles = cycles; } else { cycles = count; - thread_info->cycles = cycles; } if (block_type == 0) { @@ -2105,13 +2098,21 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) //if (!pUncompTr) printf("\tThr %d pUncompTr error\n", thread_info->idx); current_out_size = original_size; } - + start = clock(); - for (int j=0; jidx, cycles, max_cycles); - if (max_cycles < cycles) { - printf("\tThr %d malloc cyc=%d max_cycles=%d\n", thread_info->idx, cycles, max_cycles); - - for (int i =0; isignal_count; i++) { - if (max_cycles < cycles) { - if (thread_info->outptr[i]) { - free(thread_info->outptr[i]); - } - printf("\tThr %d sig i=%d malloc\n", thread_info->idx, i); - thread_info->outptr[i] = (uint8_t *) malloc(cycles * thread_info->signals[i].byte_count); - if (!thread_info->outptr[i]) printf("Thr %d thread_info->outptr[%d] error\n", thread_info->idx,i); - } - } - max_cycles = cycles; - } - for (int i =0; isignal_count; i++) { byte_offset = thread_info->signals[i].byte_offset; byte_count = thread_info->signals[i].byte_count; read = data_ptr + byte_offset; - write = thread_info->outptr[i]; + write = thread_info->signals[i].data + record_offset * byte_count; for (Py_ssize_t j = 0; j < cycles; j++) { @@ -2168,9 +2153,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) #endif } - for (int i =0; isignal_count; i++) { - if (thread_info->outptr[i]) free(thread_info->outptr[i]); - } + if (pUncomp) free(pUncomp); if (pUncompTr) free(pUncompTr); printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); @@ -2189,7 +2172,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) cycles, step = 0, invalidation_bytes; Py_ssize_t isize = 0, offset = 0; int is_list; - int64_t byte_offset, byte_count, new_cycles, max_uncompressed=0, max_compressed=0; + int64_t byte_offset, byte_count, new_cycles, max_uncompressed=0, max_compressed=0, record_offset=0; int32_t invalidation_bit_position; PtrInfoBlock block_info; @@ -2223,7 +2206,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) FILE_SHARE_READ, // dwShareMode NULL, // lpSecurityAttributes OPEN_EXISTING, // dwCreationDisposition - FILE_FLAG_RANDOM_ACCESS | FILE_FLAG_NO_BUFFERING, // dwFlagsAndAttributes + FILE_FLAG_RANDOM_ACCESS, // dwFlagsAndAttributes 0); // hTemplateFile if (hFile == INVALID_HANDLE_VALUE) { fprintf(stderr, "CreateFile failed with error %d\n", GetLastError()); @@ -2330,7 +2313,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) signal_info[i].byte_count = byte_count; signal_info[i].invalidation_bit_position = invalidation_bit_position; signal_info[i].data = (uint8_t *) PyByteArray_AsString(obj); - signal_info[i].data_position = signal_info[i].data; signal_info[i].obj = obj; } @@ -2341,7 +2323,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) signal_info[signal_count].byte_count = invalidation_bytes; signal_info[signal_count].invalidation_bit_position = -1; signal_info[signal_count].data = (uint8_t *) PyByteArray_AsString(obj); - signal_info[signal_count].data_position = signal_info[signal_count].data; signal_info[signal_count].obj = obj; } @@ -2457,8 +2438,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) thread_info[i].idx = i; thread_info[i].block_ready = block_ready[i]; thread_info[i].bytes_ready = bytes_ready[i]; - thread_info[i].outptr = (uint8_t **) malloc(sizeof(uint8_t *) * signal_and_invalidation_count); - thread_info[i].cycles = 0; } printf("%d threads %d blocks %d cycles %d size\n", thread_count, info_count, cycles, cycles * byte_count); @@ -2500,14 +2479,11 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) pthread_cond_wait(&bytes_ready[position], &bytes_ready_locks[position]); pthread_mutex_unlock(&bytes_ready_locks[position]); #endif - new_cycles = thread->cycles; - for (int j=0; joutptr[j], signal_info[j].byte_count * new_cycles); - signal_info[j].data_position += signal_info[j].byte_count * new_cycles; - } } thread->block_info = &block_info[i]; + thread->block_info->record_offset = record_offset; + record_offset += block_info[i].original_size / record_size; memcpy(thread->inptr, ((uint8_t*)lpBasePtr) + block_info[i].address, block_info[i].compressed_size); //FSEEK64(fptr, block_info[i].address, 0); @@ -2539,13 +2515,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) pthread_cond_wait(&bytes_ready[position], &bytes_ready_locks[position]); pthread_mutex_unlock(&bytes_ready_locks[position]); #endif - new_cycles = thread->cycles; - thread->cycles = 0; - for (int j=0; joutptr[j], signal_info[j].byte_count * new_cycles); - signal_info[j].data_position += signal_info[j].byte_count * new_cycles; - } - thread->stop = 1; #if defined(_WIN32) @@ -2579,7 +2548,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) for (int i=0; i Date: Fri, 3 Jan 2025 17:51:57 +0200 Subject: [PATCH 43/60] barbaric adaptation for the select function --- CMakeLists.txt | 3 + src/asammdf/__init__.py | 3 +- src/asammdf/blocks/cutils.c | 131 ++++++++++++--- src/asammdf/blocks/mdf_v4.py | 11 +- src/asammdf/blocks/utils.py | 6 + src/asammdf/mdf.py | 302 +++++++++++++++++++++++++++++++++++ 6 files changed, 429 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 26981b132..2d0e31992 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,9 @@ find_package( ) set(CMAKE_POSITION_INDEPENDENT_CODE ON) +set(LIBDEFLATE_COMPRESSION_SUPPORT OFF) +set(LIBDEFLATE_GZIP_SUPPORT OFF) +set(LIBDEFLATE_BUILD_GZIP OFF) # Add submodule libdeflate add_subdirectory(ext/libdeflate EXCLUDE_FROM_ALL) diff --git a/src/asammdf/__init__.py b/src/asammdf/__init__.py index 307bcb2a4..9f627270f 100644 --- a/src/asammdf/__init__.py +++ b/src/asammdf/__init__.py @@ -15,7 +15,7 @@ from .blocks.utils import load_channel_names_from_file from .gui import plot from .mdf import MDF, SUPPORTED_VERSIONS -from .signal import Signal +from .signal import InvalidationArray, Signal from .version import __version__ try: @@ -28,6 +28,7 @@ __all__ = [ "MDF", "SUPPORTED_VERSIONS", + "InvalidationArray", "Signal", "Source", "__cextension__", diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 89bf6f97e..a553c619f 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -1376,12 +1376,13 @@ static PyObject *get_invalidation_bits_array(PyObject *self, PyObject *args) static PyObject *get_invalidation_bits_array_C(uint8_t * data, int64_t cycles, int64_t invalidation_pos, int64_t invalidation_size) { - if (invalidation_pos<0) { + if (invalidation_pos < 0) { return Py_None; } + else { - PyObject *out; + PyObject *out=NULL; uint8_t mask, *inptr, *outptr; mask = (uint8_t ) (1 << (invalidation_pos % 8)); @@ -1390,6 +1391,7 @@ static PyObject *get_invalidation_bits_array_C(uint8_t * data, int64_t cycles, i npy_intp dims[1]; dims[0] = cycles; out = (PyArrayObject *)PyArray_EMPTY(1, dims, NPY_BOOL, 0); + if (!out) return NULL; outptr = (uint8_t *)PyArray_GETPTR1(out, 0); for (int i=0; i current_uncompressed_size) { - printf("\tThr %d new ptr\n", thread_info->idx); + //printf("\tThr %d new ptr\n", thread_info->idx); if (pUncomp) free(pUncomp); pUncomp = (uint8_t *) malloc(original_size); //if (!pUncomp) printf("\tThr %d pUncomp error\n", thread_info->idx); @@ -2092,13 +2105,13 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // reverse transposition if (block_type == 2) { if (current_out_size < original_size) { - printf("\tThr %d new trtrtrptr\n", thread_info->idx); + //printf("\tThr %d new trtrtrptr\n", thread_info->idx); if (pUncompTr) free(pUncompTr); pUncompTr = (uint8_t *) malloc(original_size); //if (!pUncompTr) printf("\tThr %d pUncompTr error\n", thread_info->idx); current_out_size = original_size; } - + start = clock(); read = pUncomp; for (int j = 0; j < (Py_ssize_t)cols; j++) @@ -2112,7 +2125,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) } end = clock(); t7 += end - start; - + data_ptr = pUncompTr; @@ -2156,7 +2169,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) if (pUncomp) free(pUncomp); if (pUncompTr) free(pUncompTr); - printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); + //printf("t1=%lf t2=%lf t3=%lf t4=%lf t5=%lf t6=%lf t7=%lf\n", t1, t2, t3, t4, t5, t6, t7); return 0; } @@ -2164,7 +2177,7 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) { Py_ssize_t info_count, signal_count, signal_and_invalidation_count, thread_count=11; - PyObject *data_blocks_info, *signals, *out = NULL, *item, *ref, *obj; + PyObject *data_blocks_info, *signals, *out = NULL, *item, *ref, *obj, *group_index, *InvalidationArray; char *outptr, *file_name; char *read_pos = NULL, *write_pos = NULL; @@ -2186,14 +2199,18 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) clock_t start, end; double tt=0; - if (!PyArg_ParseTuple(args, "OOsnnn|n", - &data_blocks_info, &signals, &file_name, &cycles, &record_size, &invalidation_bytes, + if (!PyArg_ParseTuple(args, "OOsnnnO|n", + &data_blocks_info, &signals, &file_name, &cycles, &record_size, &invalidation_bytes, &group_index, &thread_count)) { return NULL; } else { + + ref = PyImport_ImportModule("asammdf"); + InvalidationArray = PyObject_GetAttrString(ref, "InvalidationArray"); + Py_XDECREF(ref); //fptr = fopen(file_name,"rb"); TCHAR *lpFileName = TEXT(file_name); HANDLE hFile; @@ -2283,10 +2300,18 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) if (invalidation_bytes) { signal_and_invalidation_count = signal_count +1; signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * (signal_count + 1)); + if (!signal_info) { + PyErr_SetString(PyExc_ValueError, "Memmory allocation error for signal_info\n\0"); + return NULL; + } } else { signal_and_invalidation_count = signal_count; signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * signal_count); + if (!signal_info) { + PyErr_SetString(PyExc_ValueError, "Memmory allocation error for signal_info\n\0"); + return NULL; + } } for (int i=0; i 5: + if (vv := (perf_counter() - tt)) > 10: print(f"{ss / 1024/1024 / vv:.6f} MB/s {cc=} {vv=}") cc = 0 ss = 0 @@ -7691,6 +7691,7 @@ def _get_scalar( view = f"{channel_dtype.byteorder}i{vals.itemsize}" if dtype(view) != vals.dtype: vals = vals.view(view) + elif channel_type == v4c.CHANNEL_TYPE_VALUE and channel.fast_path is None: channel.fast_path = ( gp_nr, @@ -8392,9 +8393,9 @@ def _yield_selected_signals( fragments = [next(stream) for stream in data_streams] except: break - - if perf_counter() - tt > 120: - x = 1 / 0 + # + # if perf_counter() - tt > 120: + # x = 1 / 0 # prepare the master _master = self.get_master(index, data=fragments[master_index], one_piece=True) diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index f3199f23b..cb639541c 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -10,6 +10,7 @@ from io import StringIO import json import logging +import os from pathlib import Path from random import randint import re @@ -26,6 +27,7 @@ import lxml from typing_extensions import Literal, TypedDict +THREAD_COUNT = max(os.cpu_count() - 1, 1) TERMINATED = object() NONE = object() COMPARISON_NAME = re.compile(r"(\s*\d+:)?(?P.+)") @@ -1304,6 +1306,10 @@ def get_signal_data_blocks(self, index: int) -> Iterator[SignalDataBlockInfo]: except StopIteration: break + def load_all_data_blocks(self): + for _ in self.get_data_blocks(): + continue + class VirtualChannelGroup: """starting with MDF v4.20 it is possible to use remote masters and column diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index ce19e405d..940071538 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -37,9 +37,11 @@ from .blocks import v2_v3_constants as v23c from .blocks import v4_constants as v4c from .blocks.conversion_utils import from_dict +from .blocks.cutils import get_channel_raw_bytes_complete from .blocks.options import FloatInterpolation, IntegerInterpolation from .blocks.source_utils import Source from .blocks.utils import ( + as_non_byte_sized_signed_int, components, csv_bytearray2hex, csv_int2hex, @@ -56,6 +58,7 @@ randomized_string, SUPPORTED_VERSIONS, TERMINATED, + THREAD_COUNT, UINT16_u, UINT64_u, UniqueDB, @@ -3324,6 +3327,305 @@ def select( """ + def validate_blocks(blocks, record_size): + for block in blocks: + if block.original_size % record_size: + return False + + return True + + if record_offset or record_count is not None or (self.version < "4.00" and self._mapped): + return self._select_fallback( + channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate + ) + + if isinstance(raw, dict): + if "__default__" not in raw: + raise MdfException("The raw argument given as dict must contain the __default__ key") + + __default__ = raw["__default__"] + raw_dict = True + else: + raw_dict = False + + virtual_groups = self.included_channels(channels=channels, minimal=False, skip_master=False) + for virtual_group, groups in virtual_groups.items(): + if len(self._mdf.virtual_groups[virtual_group].groups) > 1: + return self._select_fallback( + channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate + ) + + output_signals = {} + + for virtual_group, groups in virtual_groups.items(): + group_index = virtual_group + grp = self._mdf.groups[group_index] + grp.load_all_data_blocks() + blocks = grp.data_blocks + record_size = grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr + cycles_nr = grp.channel_group.cycles_nr + channel_indexes = groups[group_index] + + pairs = [(group_index, ch_index) for ch_index in channel_indexes] + + master_index = self.masters_db.get(group_index, None) + if master_index is None or grp.record[master_index] is None: + return self._select_fallback( + channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate + ) + + channel = grp.channels[master_index] + master_dtype, byte_size, byte_offset, _ = grp.record[master_index] + signals = [(byte_offset, byte_size, channel.pos_invalidation_bit)] + + for ch_index in channel_indexes: + channel = grp.channels[ch_index] + + if (info := grp.record[ch_index]) is None: + print("NASOl") + return self._select_fallback( + channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate + ) + else: + _, byte_size, byte_offset, _ = info + signals.append((byte_offset, byte_size, channel.pos_invalidation_bit)) + + raw_and_invalidation = get_channel_raw_bytes_complete( + blocks, + signals, + self._mapped_file.name, + cycles_nr, + record_size, + grp.channel_group.invalidation_bytes_nr, + THREAD_COUNT, + ) + master_bytes, _ = raw_and_invalidation[0] + raw_and_invalidation = raw_and_invalidation[1:] + + # prepare the master + master = np.frombuffer(master_bytes, dtype=master_dtype) + + # fake invalidation_bytes + invalidation_bytes = bytes(grp.channel_group.invalidation_bytes_nr * cycles_nr) + + for pair, (raw_data, invalidation_bits) in zip(pairs, raw_and_invalidation): + ch_index = pair[-1] + channel = grp.channels[ch_index] + channel_dtype, byte_size, byte_offset, bit_offset = grp.record[ch_index] + vals = np.frombuffer(raw_data, dtype=channel_dtype) + + data_type = channel.data_type + + if not channel.standard_C_size: + size = byte_size + + if channel_dtype.byteorder == "=" and data_type in ( + v4c.DATA_TYPE_SIGNED_MOTOROLA, + v4c.DATA_TYPE_UNSIGNED_MOTOROLA, + ): + view = np.dtype(f">u{vals.itemsize}") + else: + view = np.dtype(f"{channel_dtype.byteorder}u{vals.itemsize}") + + if view != vals.dtype: + vals = vals.view(view) + + if bit_offset: + vals >>= bit_offset + + if channel.bit_count != size * 8: + if data_type in v4c.SIGNED_INT: + vals = as_non_byte_sized_signed_int(vals, channel.bit_count) + else: + mask = (1 << channel.bit_count) - 1 + vals &= mask + elif data_type in v4c.SIGNED_INT: + view = f"{channel_dtype.byteorder}i{vals.itemsize}" + if np.dtype(view) != vals.dtype: + vals = vals.view(view) + + conversion = channel.conversion + unit = (conversion and conversion.unit) or channel.unit + + source = channel.source + + if source: + source = Source.from_source(source) + else: + cg_source = grp.channel_group.acq_source + if cg_source: + source = Source.from_source(cg_source) + else: + source = None + + master_metadata = self._master_channel_metadata.get(group_index, None) + + output_signals[pair] = Signal( + samples=vals, + timestamps=master, + unit=unit, + name=channel.name, + comment=channel.comment, + conversion=conversion, + raw=True, + master_metadata=master_metadata, + attachment=None, + source=source, + display_names=channel.display_names, + bit_count=channel.bit_count, + flags=Signal.Flags.no_flags, + invalidation_bits=invalidation_bits, + encoding=None, + group_index=group_index, + channel_index=ch_index, + ) + + indexes = [] + + for item in channels: + if not isinstance(item, (list, tuple)): + item = [item] + indexes.append(self._validate_channel_selection(*item)) + + signals = [output_signals[pair] for pair in indexes] + + if copy_master: + for signal in signals: + signal.timestamps = signal.timestamps.copy() + + for signal in signals: + if (raw_dict and not raw.get(signal.name, __default__)) or (not raw_dict and not raw): + conversion = signal.conversion + if conversion: + samples = conversion.convert( + signal.samples, ignore_value2text_conversions=ignore_value2text_conversions + ) + signal.samples = samples + + signal.raw = False + signal.conversion = None + if signal.samples.dtype.kind == "S": + signal.encoding = "utf-8" if self.version >= "4.00" else "latin-1" + + if validate: + signals = [sig.validate(copy=False) for sig in signals] + + for signal, channel in zip(signals, channels): + if isinstance(channel, str): + signal.name = channel + else: + name = channel[0] + if name is not None: + signal.name = name + + unique = set() + for i, signal in enumerate(signals): + obj_id = id(signal) + if id(signal) in unique: + signals[i] = signal.copy() + unique.add(obj_id) + + return signals + + def _select_fallback( + self, + channels: ChannelsType, + record_offset: int = 0, + raw: bool | dict[str, bool] = False, + copy_master: bool = True, + ignore_value2text_conversions: bool = False, + record_count: int | None = None, + validate: bool = False, + ) -> list[Signal]: + """retrieve the channels listed in *channels* argument as *Signal* + objects + + .. note:: the *dataframe* argument was removed in version 5.8.0 + use the ``to_dataframe`` method instead + + Parameters + ---------- + channels : list + list of items to be filtered; each item can be : + + * a channel name string + * (channel name, group index, channel index) list or tuple + * (channel name, group index) list or tuple + * (None, group index, channel index) list or tuple + + record_offset : int + record number offset; optimization to get the last part of signal samples + raw : bool | dict[str, bool] + get raw channel samples; default *False* + + .. versionchanged:: 8.0.0 + + provide individual raw mode based on a dict. If the parameters is given + as dict then it must contain the key ``__default__`` with the default raw value. The dict keys + are the channel names and the values are the boolean raw values for each channel. + + copy_master : bool + option to get a new timestamps array for each selected Signal or to + use a shared array for channels of the same channel group; default *True* + ignore_value2text_conversions (False) : bool + valid only for the channels that have value to text conversions and + if *raw=False*. If this is True then the raw numeric values will be + used, and the conversion will not be applied. + + .. versionchanged:: 5.8.0 + + validate (False) : bool + consider the invalidation bits + + .. versionadded:: 5.16.0 + + Returns + ------- + signals : list + list of *Signal* objects based on the input channel list + + Examples + -------- + >>> from asammdf import MDF, Signal + >>> import numpy as np + >>> t = np.arange(5) + >>> s = np.ones(5) + >>> mdf = MDF() + >>> for i in range(4): + ... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)] + ... mdf.append(sigs) + ... + >>> # select SIG group 0 default index 1 default, SIG group 3 index 1, SIG group 2 index 1 default and channel index 2 from group 1 + ... + >>> mdf.select(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)]) + [ + , + , + , + ] + + """ + if isinstance(raw, dict): if "__default__" not in raw: raise MdfException("The raw argument given as dict must contain the __default__ key") From 56630b0ced271a10e05e49cf23ee0df7cafbf6af Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 8 Jan 2025 13:14:06 +0200 Subject: [PATCH 44/60] use Fragment class instead of tuple --- src/asammdf/blocks/mdf_v4.py | 147 ++++++++++++++++++++++++----------- src/asammdf/blocks/utils.py | 24 ++++++ src/asammdf/mdf.py | 3 - 3 files changed, 126 insertions(+), 48 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 1e2f861f7..67e0e815d 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -106,6 +106,7 @@ extract_encryption_information, extract_xml_comment, fmt_to_datatype_v4, + Fragment, get_fmt_v4, get_text_v4, Group, @@ -165,7 +166,6 @@ # 100 extra steps for the sorting, 1 step after sorting and 1 step at finish SORT_STEPS = 102 -DATA_IS_CHANNEL_BYTES = [-2, -2] logger = logging.getLogger("asammdf") @@ -655,7 +655,7 @@ def _read(self, mapped: bool = False, progress=None) -> None: inval_total_size=inval_total_size, record_size=record_size, ) - data_blocks = [] + data_blocks = list(data_blocks_info) # load the info blocks directly here uses_ld = self._uses_ld( address=address, stream=stream, @@ -1427,9 +1427,9 @@ def _load_data( if not samples_size: if rm: - yield b"", offset, _count, b"" + yield Fragment(b"", offset, _count, b"") else: - yield b"", offset, _count, None + yield Fragment(b"", offset, _count, None) else: if group.read_split_count: split_size = group.read_split_count * samples_size @@ -1629,14 +1629,17 @@ def _load_data( if rm and invalidation_size: __data = buffer[:record_count] _count = len(__data) // samples_size - yield __data, offset // samples_size, _count, invalidation_data_[ - :invalidation_record_count - ] + yield Fragment( + __data, + offset // samples_size, + _count, + invalidation_data_[:invalidation_record_count], + ) invalidation_record_count -= len(invalidation_data_) else: __data = buffer[:record_count] _count = len(__data) // samples_size - yield __data, offset // samples_size, _count, None + yield Fragment(__data, offset // samples_size, _count, None) has_yielded = True record_count -= split_size if record_count <= 0: @@ -1645,10 +1648,10 @@ def _load_data( else: if rm and invalidation_size: _count = split_size // samples_size - yield buffer, offset // samples_size, _count, invalidation_data_ + yield Fragment(buffer, offset // samples_size, _count, invalidation_data_) else: _count = split_size // samples_size - yield buffer, offset // samples_size, _count, None + yield Fragment(buffer, offset // samples_size, _count, None) has_yielded = True else: @@ -1660,14 +1663,17 @@ def _load_data( if record_count is not None: if rm and invalidation_size: - yield buffer[:record_count], offset // samples_size, _count, invalidation_data_[ - :invalidation_record_count - ] + yield Fragment( + buffer[:record_count], + offset // samples_size, + _count, + invalidation_data_[:invalidation_record_count], + ) invalidation_record_count -= len(invalidation_data_) else: __data = buffer[:record_count] _count = len(__data) // samples_size - yield __data, offset // samples_size, _count, None + yield Fragment(__data, offset // samples_size, _count, None) has_yielded = True record_count -= split_size if record_count <= 0: @@ -1676,10 +1682,10 @@ def _load_data( else: if rm and invalidation_size: _count = split_size // samples_size - yield buffer, offset // samples_size, _count, invalidation_data_ + yield Fragment(buffer, offset // samples_size, _count, invalidation_data_) else: _count = split_size // samples_size - yield buffer, offset // samples_size, _count, None + yield Fragment(buffer, offset // samples_size, _count, None) has_yielded = True offset += split_size @@ -1720,28 +1726,30 @@ def _load_data( if rm and invalidation_size: __data = data_[:record_count] _count = len(__data) // samples_size - yield __data, offset // samples_size, _count, invalidation_data_[:invalidation_record_count] + yield Fragment( + __data, offset // samples_size, _count, invalidation_data_[:invalidation_record_count] + ) invalidation_record_count -= len(invalidation_data_) else: __data = data_[:record_count] _count = len(__data) // samples_size - yield __data, offset // samples_size, _count, None + yield Fragment(__data, offset // samples_size, _count, None) has_yielded = True record_count -= len(data_) else: if rm and invalidation_size: _count = len(data_) // samples_size - yield data_, offset // samples_size, _count, invalidation_data_ + yield Fragment(data_, offset // samples_size, _count, invalidation_data_) else: _count = len(data_) // samples_size - yield data_, offset // samples_size, _count, None + yield Fragment(data_, offset // samples_size, _count, None) has_yielded = True if not has_yielded: if rm and invalidation_size: - yield b"", 0, 0, b"" + yield Fragment(b"", 0, 0, b"") else: - yield b"", 0, 0, None + yield Fragment(b"", 0, 0, None) def _prepare_record(self, group: Group) -> list: """compute record @@ -1773,7 +1781,7 @@ def _prepare_record(self, group: Group) -> list: ch_type = new_ch.channel_type dependency_list = group.channel_dependencies[idx] - if ch_type not in v4c.VIRTUAL_TYPES and not dependency_list: + if ch_type not in v4c.VIRTUAL_TYPES: # adjust size to 1, 2, 4 or 8 bytes size = bit_offset + bit_count @@ -1796,7 +1804,11 @@ def _prepare_record(self, group: Group) -> list: if not new_ch.dtype_fmt: new_ch.dtype_fmt = dtype(get_fmt_v4(data_type, size, ch_type)) - if bit_offset or (new_ch.dtype_fmt.kind in "ui" and size < 64 and size not in (8, 16, 32)): + if ( + bit_offset + or dependency_list + or (new_ch.dtype_fmt.kind in "ui" and size < 64 and size not in (8, 16, 32)) + ): new_ch.standard_C_size = False record.append( @@ -2181,6 +2193,7 @@ def _get_data_blocks_info( if id_string == block_type: size = block_len - 24 if size: + size = min(size, total_size) address = address + COMMON_SIZE # split the DTBLOCK into chucks of up to 32MB @@ -2635,7 +2648,12 @@ def get_invalidation_bits( """ group = self.groups[group_index] - data_bytes, offset, _count, invalidation_bytes = fragment + data_bytes, offset, _count, invalidation_bytes = ( + fragment.data, + fragment.record_offset, + fragment.record_count, + fragment.invalidation_data, + ) if invalidation_bytes is None: invalidation_bytes_nr = group.channel_group.invalidation_bytes_nr @@ -7006,7 +7024,7 @@ def _get_structure( count = 0 for fragment in data: - bts = fragment[0] + bts = fragment.data buffer = get_channel_raw_bytes(bts, record_size, byte_offset, _dtype.itemsize) @@ -7225,7 +7243,12 @@ def _get_array( arrays = [] types = [] - data_bytes, offset, _count, invalidation_bytes = fragment + data_bytes, offset, _count, invalidation_bytes = ( + fragment.data, + fragment.record_offset, + fragment.record_count, + fragment.invalidation_data, + ) cycles = len(data_bytes) // samples_size @@ -7477,10 +7500,9 @@ def _fast_scalar_path( # channel_type, # bit_count, dtype, - data, + fragment, ): - data_bytes, *rec_key, invalidation_bytes = data - if rec_key != DATA_IS_CHANNEL_BYTES: + if fragment.is_record: buffer = get_channel_raw_bytes( data_bytes, record_size, @@ -7488,12 +7510,12 @@ def _fast_scalar_path( byte_size, ) else: - buffer = data_bytes + buffer = fragment.data vals = frombuffer(buffer, dtype=dtype) if pos_invalidation_bit >= 0: - invalidation_bits = self.get_invalidation_bits(gp_nr, pos_invalidation_bit, data) + invalidation_bits = self.get_invalidation_bits(gp_nr, pos_invalidation_bit, fragment) else: invalidation_bits = None @@ -7559,7 +7581,12 @@ def _get_scalar( data = (data,) for fragment in data: - data_bytes, offset, _count, invalidation_bytes = fragment + data_bytes, offset, _count, invalidation_bytes = ( + fragment.data, + fragment.record_offset, + fragment.record_count, + fragment.invalidation_data, + ) offset = offset // record_size vals = arange(len(data_bytes) // record_size, dtype=ch_dtype) @@ -7643,7 +7670,7 @@ def _get_scalar( if one_piece: fragment = data - data_bytes, rec_offset, rec_count, _ = fragment + data_bytes, rec_offset, rec_count = fragment.data, fragment.record_offset, fragment.record_count info = grp.record[ch_nr] @@ -7652,7 +7679,7 @@ def _get_scalar( if ch_nr == 0 and len(grp.channels) == 1 and channel.dtype_fmt.itemsize == record_size: buffer = bytearray(data_bytes) else: - if (rec_offset, rec_count) != (-2, -2): + if fragment.is_record: buffer = get_channel_raw_bytes( data_bytes, record_size + channel_group.invalidation_bytes_nr, @@ -7735,7 +7762,12 @@ def _get_scalar( if info is None: for count, fragment in enumerate(data, 1): - data_bytes, offset, _count, invalidation_bytes = fragment + data_bytes, offset, _count, invalidation_bytes = ( + fragment.data, + fragment.record_offset, + fragment.record_count, + fragment.invalidation_data, + ) vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr) @@ -7758,7 +7790,7 @@ def _get_scalar( count = 0 for count, fragment in enumerate(data, 1): - data_bytes = fragment[0] + data_bytes = fragment.data if ch_nr == 0 and len(grp.channels) == 1 and channel.dtype_fmt.itemsize == record_size: buffer.append(bytearray(data_bytes)) @@ -8416,14 +8448,14 @@ def _yield_selected_signals( if 1 and len(channels) >= 100: # prepare the invalidation bytes for this group and fragment invalidation_bytes = get_channel_raw_bytes( - fragment[0], + fragment.data, grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, grp.channel_group.samples_byte_nr, grp.channel_group.invalidation_bytes_nr, ) channels_raw_data = get_channel_raw_bytes_parallel( - fragment[0], + fragment.data, grp.channel_group.samples_byte_nr + grp.channel_group.invalidation_bytes_nr, group_info[group_index], THREAD_COUNT, @@ -8434,7 +8466,17 @@ def _yield_selected_signals( signal = self.get( group=group_index, index=channel_index, - data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, + data=( + Fragment( + data=raw_data, + record_offset=fragment.record_offset, + record_count=fragment.record_count, + invalidation_data=invalidation_bytes, + is_record=False, + ) + if raw_data + else fragment + ), raw=True, ignore_invalidation_bits=True, samples_only=False, @@ -8448,7 +8490,17 @@ def _yield_selected_signals( signal, invalidation_bits = self.get( group=group_index, index=channel_index, - data=(raw_data, -2, -2, invalidation_bytes) if raw_data else fragment, + data=( + Fragment( + data=raw_data, + record_offset=fragment.record_offset, + record_count=fragment.record_count, + invalidation_data=invalidation_bytes, + is_record=False, + ) + if raw_data + else fragment + ), raw=True, ignore_invalidation_bits=True, samples_only=True, @@ -8588,7 +8640,12 @@ def get_master( fragment = data if fragment: - data_bytes, offset, _count, invalidation_bytes = fragment + data_bytes, offset, _count, invalidation_bytes = ( + fragment.data, + fragment.record_offset, + fragment.record_count, + fragment.invalidation_data, + ) cycles_nr = len(data_bytes) // record_size if record_size else 0 else: offset = 0 @@ -8637,7 +8694,7 @@ def get_master( else: data = (fragment,) - buffer = bytearray().join([fragment[0] for fragment in data]) + buffer = bytearray().join([fragment.data for fragment in data]) t = frombuffer(buffer, dtype=time_ch.dtype_fmt) @@ -8645,7 +8702,7 @@ def get_master( dtype_, byte_size, byte_offset, bit_offset = group.record[time_ch_nr] if one_piece: - data_bytes = data[0] + data_bytes = data.data buffer = get_channel_raw_bytes( data_bytes, @@ -8670,7 +8727,7 @@ def get_master( buffer = bytearray().join( [ get_channel_raw_bytes( - fragment[0], + fragment.data, record_size, byte_offset, byte_size, diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index cb639541c..c544e9ff9 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -1514,6 +1514,30 @@ def __repr__(self) -> str: ) +class Fragment: + def __init__( + self, + data, + record_offset, + record_count, + invalidation_data, + is_record=True, + ) -> None: + self.data = data + self.record_count = record_count + self.record_offset = record_offset + self.invalidation_data = invalidation_data + self.is_record = is_record + + def __repr__(self) -> str: + return ( + f"FragmentInfo({len(self.data)} bytes, " + f"record_offset={self.record_offset}, " + f"record_count={self.record_count}, " + f"is_record={self.is_record})" + ) + + class InvalidationBlockInfo(DataBlockInfo): __slots__ = ("all_valid",) diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index 940071538..aea7b8b04 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -3405,9 +3405,6 @@ def validate_blocks(blocks, record_size): # prepare the master master = np.frombuffer(master_bytes, dtype=master_dtype) - # fake invalidation_bytes - invalidation_bytes = bytes(grp.channel_group.invalidation_bytes_nr * cycles_nr) - for pair, (raw_data, invalidation_bits) in zip(pairs, raw_and_invalidation): ch_index = pair[-1] channel = grp.channels[ch_index] From 6adceeb977c8a3752e518e10d6899a8e21b1d886 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Fri, 10 Jan 2025 12:44:21 +0200 Subject: [PATCH 45/60] next --- src/asammdf/blocks/cutils.c | 62 +++++++++++++++++++----------------- src/asammdf/blocks/mdf_v4.py | 41 ++++++++++++++++-------- src/asammdf/blocks/utils.py | 2 +- 3 files changed, 62 insertions(+), 43 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index a553c619f..8c83cc78b 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2052,7 +2052,13 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) t3 += end - start; if (thread_info->stop) break; - //printf("Thr %d processing\n", thread_info->idx); + /* printf("Thr %d processing\n", thread_info->idx); + printf("Block type=%d\n", thread_info->block_info->block_type); + printf("Block limit=%d\n", thread_info->block_info->block_limit); + printf("Block original_size%d\n", thread_info->block_info->original_size); + printf("Block compressed_size=%d\n", thread_info->block_info->compressed_size); + printf("Block param=%d\n", thread_info->block_info->param); + printf("Block record_offset=%d\n", thread_info->block_info->record_offset); */ #else pthread_mutex_lock(&thread_info->block_ready_lock); @@ -2066,9 +2072,6 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) block_type = thread_info->block_info->block_type; record_offset = thread_info->block_info->record_offset; - cols = param; - lines = original_size / cols; - count = original_size / record_size; if (thread_info->block_info->block_limit >= 0) { @@ -2104,6 +2107,9 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // reverse transposition if (block_type == 2) { + cols = param; + lines = original_size / cols; + if (current_out_size < original_size) { //printf("\tThr %d new trtrtrptr\n", thread_info->idx); if (pUncompTr) free(pUncompTr); @@ -2601,8 +2607,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) free(thread_info); } - //fclose(fptr); - PyObject *inv, *inv_array, *origin; out = PyTuple_New(signal_count); @@ -2621,34 +2625,34 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) for (int i=0; i< invalidation_bytes * 8; i++) cache[i] = NULL; for (int i=0; i tuple[int, list[tuple[int, int]] | None, dtype | None]: filter_channels = self.use_load_filter use_display_names = self._use_display_names @@ -842,7 +843,7 @@ def _read_channels( unique_names = UniqueDB() - if channel_composition: + if parent_channel: composition = [] composition_channels = [] @@ -907,7 +908,7 @@ def _read_channels( display_names = {_name.split(path_separator, 1)[0]: val for _name, val in display_names.items()} if ( - channel_composition + parent_channel or channel_type in v4c.MASTER_TYPES or name in self.load_filter or (use_display_names and any(dsp_name in self.load_filter for dsp_name in display_names)) @@ -950,7 +951,7 @@ def _read_channels( stream, dg_cntr, ch_cntr, - False, + None, mapped=mapped, ) @@ -991,7 +992,7 @@ def _read_channels( self._ch_map[ch_addr] = entry channels.append(channel) - if channel_composition: + if parent_channel: composition.append(entry) composition_channels.append(channel) @@ -1040,7 +1041,7 @@ def _read_channels( stream, dg_cntr, ch_cntr, - True, + channel, mapped=mapped, ) dependencies[index] = ret_composition @@ -1084,10 +1085,15 @@ def _read_channels( stream, dg_cntr, ch_cntr, - True, + channel, mapped=mapped, ) + if channel.name == 'QUGIAUGCKRWYPJEVRSOGPNGXXMAGIUZBFPJTWGUJRVQEOKWBCQIFZZCZIXHMVZTVRZEGKWWXJJQJZFUZOCVYHSA': + x = 1 + + channel.dtype_fmt = ret_composition_dtype + ca_cnt = len(dependencies[index]) if ret_composition: dependencies[index].extend(ret_composition) @@ -1209,11 +1215,20 @@ def _get_name_with_indices(ch_name: str, ch_parent_name: str, indices: list[int] # go to next channel of the current channel group ch_addr = channel.next_ch_addr - if channel_composition: - composition_channels.sort() - composition_dtype = dtype( - [(unique_names.get_unique_name(channel.name), channel.dtype_fmt) for channel in composition_channels] - ) + if parent_channel: + composition_channels.sort(key = lambda x: x.byte_offset) + padding = 0 + dtype_fields = [] + offset = parent_channel.byte_offset + + for comp_channel in composition_channels: + if (delta := (comp_channel.byte_offset - offset)) > 0: + dtype_fields.append((f'__padding_{padding}__', f'V{delta}')) + padding += 1 + dtype_fields.append((unique_names.get_unique_name(comp_channel.name), comp_channel.dtype_fmt)) + offset = comp_channel.byte_offset + comp_channel.dtype_fmt.itemsize + + composition_dtype = dtype(dtype_fields) else: composition = None @@ -7504,7 +7519,7 @@ def _fast_scalar_path( ): if fragment.is_record: buffer = get_channel_raw_bytes( - data_bytes, + fragment.data, record_size, byte_offset, byte_size, diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index c544e9ff9..52543f00b 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -539,7 +539,7 @@ def get_fmt_v4(data_type: int, size: int, channel_type: int = v4c.CHANNEL_TYPE_V """ if data_type in v4c.NON_SCALAR_TYPES: - size = size // 8 + size = size // 8 or 1 if data_type in ( v4c.DATA_TYPE_BYTEARRAY, From e192767a9dabb8201f40f0bdb2ba64f7edcbd06f Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:25:37 +0200 Subject: [PATCH 46/60] linux fix --- src/asammdf/blocks/cutils.c | 20 ++++++++++++++++---- src/asammdf/blocks/mdf_v4.py | 8 ++------ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 8c83cc78b..36da3bf9c 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -18,6 +18,7 @@ #else #include #include +#include #define Sleep(x) usleep((int)(1000 * (x))) #define FSEEK64(file, address, whence) fseeko((file), (address), (whence)) #define FTELL64(file) ftello(file) @@ -2107,8 +2108,8 @@ void * get_channel_raw_bytes_complete_C(void *lpParam ) // reverse transposition if (block_type == 2) { - cols = param; - lines = original_size / cols; + cols = param; + lines = original_size / cols; if (current_out_size < original_size) { //printf("\tThr %d new trtrtrptr\n", thread_info->idx); @@ -2218,6 +2219,8 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) InvalidationArray = PyObject_GetAttrString(ref, "InvalidationArray"); Py_XDECREF(ref); //fptr = fopen(file_name,"rb"); + +#if defined(_WIN32) TCHAR *lpFileName = TEXT(file_name); HANDLE hFile; HANDLE hMap; @@ -2274,7 +2277,6 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) return 1; } -#if defined(_WIN32) HANDLE *hThreads, *block_ready, *bytes_ready; DWORD *dwThreadIdArray; hThreads = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); @@ -2282,6 +2284,14 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) block_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); bytes_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count); #else + int fdin = open(file_name, O_RDONLY); + struct stat statbuf; + uint8_t * lpBasePtr; + + fstat (fdin, &statbuf); + + lpBasePtr = mmap (0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0)); + pthread_t *dwThreadIdArray = (pthread_t *) malloc(sizeof(pthread_t) * thread_count); pthread_mutex_t *bytes_ready_locks, *block_ready_locks; // Declare mutex @@ -2582,11 +2592,11 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) if (position == thread_count) position = 0; } +#if defined(_WIN32) UnmapViewOfFile(lpBasePtr); CloseHandle(hMap); CloseHandle(hFile); -#if defined(_WIN32) WaitForMultipleObjects(thread_count, hThreads, true, INFINITE); for (int i=0; i< thread_count; i++) { CloseHandle(hThreads[i]); @@ -2594,6 +2604,8 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) CloseHandle(bytes_ready[i]); } #else + munmap(lpBasePtr, statbuf.st_size); + close(fdin); for (int i=0; i< thread_count; i++) { pthread_join(dwThreadIdArray[i], NULL); } diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index bdf3aa50c..256d5962c 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -832,7 +832,6 @@ def _read_channels( ch_cntr: int, parent_channel: Channel | None = None, mapped: bool = False, - ) -> tuple[int, list[tuple[int, int]] | None, dtype | None]: filter_channels = self.use_load_filter use_display_names = self._use_display_names @@ -1089,9 +1088,6 @@ def _read_channels( mapped=mapped, ) - if channel.name == 'QUGIAUGCKRWYPJEVRSOGPNGXXMAGIUZBFPJTWGUJRVQEOKWBCQIFZZCZIXHMVZTVRZEGKWWXJJQJZFUZOCVYHSA': - x = 1 - channel.dtype_fmt = ret_composition_dtype ca_cnt = len(dependencies[index]) @@ -1216,14 +1212,14 @@ def _get_name_with_indices(ch_name: str, ch_parent_name: str, indices: list[int] ch_addr = channel.next_ch_addr if parent_channel: - composition_channels.sort(key = lambda x: x.byte_offset) + composition_channels.sort(key=lambda x: x.byte_offset) padding = 0 dtype_fields = [] offset = parent_channel.byte_offset for comp_channel in composition_channels: if (delta := (comp_channel.byte_offset - offset)) > 0: - dtype_fields.append((f'__padding_{padding}__', f'V{delta}')) + dtype_fields.append((f"__padding_{padding}__", f"V{delta}")) padding += 1 dtype_fields.append((unique_names.get_unique_name(comp_channel.name), comp_channel.dtype_fmt)) offset = comp_channel.byte_offset + comp_channel.dtype_fmt.itemsize From ff79e38ef5537b96ef43771a2320ca37eafa22ac Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:32:05 +0200 Subject: [PATCH 47/60] Py_NewRef not available in py3.9 --- src/asammdf/blocks/cutils.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index 36da3bf9c..dd9527ffe 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2651,7 +2651,8 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) if (!inv) return NULL; origin = PyTuple_New(2); if (!origin) return NULL; - PyTuple_SetItem(origin, 0, Py_NewRef(group_index)); + Py_INCREF(group_index); + PyTuple_SetItem(origin, 0, group_index); PyTuple_SetItem(origin, 1, PyLong_FromLong(signal_info[i].invalidation_bit_position)); inv_array = PyObject_CallFunction( InvalidationArray, @@ -2662,8 +2663,9 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) cache[signal_info[i].invalidation_bit_position] = inv_array; Py_XDECREF(origin); } - - PyTuple_SetItem(ref, 1, Py_NewRef(cache[signal_info[i].invalidation_bit_position])); + + Py_INCREF(cache[signal_info[i].invalidation_bit_position]); + PyTuple_SetItem(ref, 1, cache[signal_info[i].invalidation_bit_position]); } } else { From 8b26ce3b63d332defb5ca647927958cc1b023616 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:36:15 +0200 Subject: [PATCH 48/60] linux imports --- src/asammdf/blocks/cutils.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index dd9527ffe..c9a051096 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -18,7 +18,9 @@ #else #include #include +#include #include +#include #define Sleep(x) usleep((int)(1000 * (x))) #define FSEEK64(file, address, whence) fseeko((file), (address), (whence)) #define FTELL64(file) ftello(file) From dde32998d784713ad28a023ac6c083c215135ee4 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:38:38 +0200 Subject: [PATCH 49/60] double parenthesis --- src/asammdf/blocks/cutils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/cutils.c b/src/asammdf/blocks/cutils.c index c9a051096..f86688e36 100644 --- a/src/asammdf/blocks/cutils.c +++ b/src/asammdf/blocks/cutils.c @@ -2292,7 +2292,7 @@ static PyObject *get_channel_raw_bytes_complete(PyObject *self, PyObject *args) fstat (fdin, &statbuf); - lpBasePtr = mmap (0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0)); + lpBasePtr = mmap (0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0); pthread_t *dwThreadIdArray = (pthread_t *) malloc(sizeof(pthread_t) * thread_count); From 9f0185cfece0c39870c29109d2e4404b5e237c29 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:42:51 +0200 Subject: [PATCH 50/60] use the new Fragment objects --- src/asammdf/blocks/mdf_v4.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index 256d5962c..e8c185893 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -9507,7 +9507,7 @@ def save( data = self._load_data(gp) if chunks == 1: - data_, _1, _2, inval_ = next(data) + data_, inval_ = data.data, data.invalidation_data if self.version >= "4.20" and gp.uses_ld: if compression: if gp.channel_group.samples_byte_nr > 1: From 930305b65bb7439a8df32a7d4347ca4331c17956 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:47:47 +0200 Subject: [PATCH 51/60] more fragment usage --- src/asammdf/blocks/mdf_v4.py | 11 ++++++----- src/asammdf/version.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/asammdf/blocks/mdf_v4.py b/src/asammdf/blocks/mdf_v4.py index e8c185893..2b7e7b19f 100644 --- a/src/asammdf/blocks/mdf_v4.py +++ b/src/asammdf/blocks/mdf_v4.py @@ -9507,7 +9507,8 @@ def save( data = self._load_data(gp) if chunks == 1: - data_, inval_ = data.data, data.invalidation_data + fragment = next(data) + data_, inval_ = fragment.data, fragment.invalidation_data if self.version >= "4.20" and gp.uses_ld: if compression: if gp.channel_group.samples_byte_nr > 1: @@ -9610,7 +9611,8 @@ def save( dv_addr = [] di_addr = [] block_size = 0 - for i, (data_, _1, _2, inval_) in enumerate(data): + for i, fragment in enumerate(data): + data_, inval_ = fragment.data, fragment.invalidation_data if i == 0: block_size = len(data_) if compression: @@ -9698,9 +9700,8 @@ def save( } dl_block = DataList(**kwargs) - for i, data__ in enumerate(data): - - data_ = data__[0] + for i, fragment in enumerate(data): + data_ = fragment.data if compression and self.version >= "4.10": if compression == 1: diff --git a/src/asammdf/version.py b/src/asammdf/version.py index 7f42128f4..4520a7464 100644 --- a/src/asammdf/version.py +++ b/src/asammdf/version.py @@ -1,3 +1,3 @@ """ asammdf version module """ -__version__ = "8.1.0.dev3" +__version__ = "8.1.0.dev4" From 39238d09c6fc9e2b1fac9ac8242427af4d419692 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:54:40 +0200 Subject: [PATCH 52/60] only works for mapped file --- src/asammdf/mdf.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index aea7b8b04..c76e08d57 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -3334,7 +3334,12 @@ def validate_blocks(blocks, record_size): return True - if record_offset or record_count is not None or (self.version < "4.00" and self._mapped): + if ( + not self._mapped_file + or record_offset + or record_count is not None + or (self.version < "4.00" and self._mapped) + ): return self._select_fallback( channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate ) From 120b4f2d427b5e780ce71de589bf809b895e9834 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Mon, 13 Jan 2025 08:58:52 +0200 Subject: [PATCH 53/60] MDF v4 only --- src/asammdf/mdf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index c76e08d57..2c4f446a6 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -3335,10 +3335,10 @@ def validate_blocks(blocks, record_size): return True if ( - not self._mapped_file + self.version < "4.00" + or not self._mapped_file or record_offset or record_count is not None - or (self.version < "4.00" and self._mapped) ): return self._select_fallback( channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate From d5bd27d81b9fdafd38e6b4d85da117de2a1f67f2 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 09:56:11 +0200 Subject: [PATCH 54/60] add libdefalte license notice --- LICENSE | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/LICENSE b/LICENSE index a8f6ca6dc..1e7c9849f 100644 --- a/LICENSE +++ b/LICENSE @@ -163,3 +163,34 @@ whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. + +-------------------------------------------------------------------------------- + +3rdparty dependency libdeflate is statically linked. +libdeflate has the following license: + + +Copyright 2016 Eric Biggers +Copyright 2024 Google LLC + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- \ No newline at end of file From 0b0d7f36b0d906e1e2f477b792c2b65c11926b5f Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 12:59:45 +0200 Subject: [PATCH 55/60] prevent \n avalanche --- src/asammdf/gui/widgets/file.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/asammdf/gui/widgets/file.py b/src/asammdf/gui/widgets/file.py index 283649932..8abce53ee 100644 --- a/src/asammdf/gui/widgets/file.py +++ b/src/asammdf/gui/widgets/file.py @@ -1051,7 +1051,7 @@ def to_config(self): config["windows"] = windows config["active_window"] = current_window.windowTitle() if current_window else "" config["functions"] = self.functions - config["global_variables"] = self.global_variables + config["global_variables"] = "\n".join([line for line in self.global_variables.splitlines() if line]) return config @@ -1268,6 +1268,7 @@ def load_channel_list(self, event=None, file_name=None, manually=False, show_pro self.functions.update(info.get("functions", {})) self.global_variables = f'{self.global_variables}\n{info.get("global_variables", "")}' + self.global_variables = '\n'.join([line for line in self.global_variables.splitlines() if line]) if channels: iterator = QtWidgets.QTreeWidgetItemIterator(self.channels_tree) From 910322570f4e4b7391cf2007d27328fde1bfa8c6 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 13:51:35 +0200 Subject: [PATCH 56/60] disable get_channel_raw_bytes_complete usage for now --- src/asammdf/mdf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index 2c4f446a6..2f8cae7a7 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -3339,6 +3339,7 @@ def validate_blocks(blocks, record_size): or not self._mapped_file or record_offset or record_count is not None + or True # disable for now ): return self._select_fallback( channels, record_offset, raw, copy_master, ignore_value2text_conversions, record_count, validate From b8873d20bc2c119399483247aff3adf67a49f851 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 14:08:15 +0200 Subject: [PATCH 57/60] fixes #1127 : add process_bus_logging to stack and concatenate --- src/asammdf/gui/widgets/channel_group_info.py | 3 ++- src/asammdf/gui/widgets/file.py | 2 +- src/asammdf/mdf.py | 21 +++++++++++++++++-- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/asammdf/gui/widgets/channel_group_info.py b/src/asammdf/gui/widgets/channel_group_info.py index 90961ff8a..547865c37 100644 --- a/src/asammdf/gui/widgets/channel_group_info.py +++ b/src/asammdf/gui/widgets/channel_group_info.py @@ -79,7 +79,8 @@ def _display(self, position): record_count = record_end - record_offset data = b"".join( - e[0] for e in self.mdf._load_data(self.group, record_offset=record_offset, record_count=record_count) + fragment.data + for fragment in self.mdf._load_data(self.group, record_offset=record_offset, record_count=record_count) ) data = pd.Series(list(np.frombuffer(data, dtype=f"({self.record_size},)u1"))) diff --git a/src/asammdf/gui/widgets/file.py b/src/asammdf/gui/widgets/file.py index 8abce53ee..3cb0db7c0 100644 --- a/src/asammdf/gui/widgets/file.py +++ b/src/asammdf/gui/widgets/file.py @@ -1268,7 +1268,7 @@ def load_channel_list(self, event=None, file_name=None, manually=False, show_pro self.functions.update(info.get("functions", {})) self.global_variables = f'{self.global_variables}\n{info.get("global_variables", "")}' - self.global_variables = '\n'.join([line for line in self.global_variables.splitlines() if line]) + self.global_variables = "\n".join([line for line in self.global_variables.splitlines() if line]) if channels: iterator = QtWidgets.QTreeWidgetItemIterator(self.channels_tree) diff --git a/src/asammdf/mdf.py b/src/asammdf/mdf.py index 2f8cae7a7..1ce75b35f 100644 --- a/src/asammdf/mdf.py +++ b/src/asammdf/mdf.py @@ -203,6 +203,11 @@ class MDF: .. versionadded:: 7.0.0 + process_bus_logging (\*\*kwargs) : bool + controls if the bus processing of MDF v4 files is done when the file is loaded. Default True + + .. versionadded:: 8.0.0 + Examples -------- >>> mdf = MDF(version='3.30') # new MDF object with version 3.30 @@ -2245,6 +2250,11 @@ def concatenate( use_display_names (False) : bool + process_bus_logging (True) : bool + controls if the bus processing of MDF v4 files is done when the file is loaded. Default True + + .. versionadded:: 8.1.0 + Examples -------- >>> conc = MDF.concatenate( @@ -2646,7 +2656,8 @@ def concatenate( first_mdf.close() try: - merged._process_bus_logging() + if kwargs.get("process_bus_logging", True): + merged._process_bus_logging() except: pass @@ -2680,6 +2691,11 @@ def stack( use_display_names (False) : bool + process_bus_logging (True) : bool + controls if the bus processing of MDF v4 files is done when the file is loaded. Default True + + .. versionadded:: 8.1.0 + Examples -------- >>> stacked = MDF.stack( @@ -2834,7 +2850,8 @@ def stack( return TERMINATED try: - stacked._process_bus_logging() + if kwargs.get("process_bus_logging", True): + stacked._process_bus_logging() except: pass From c49dacdca7e461367ac0c650f23c1841942380ac Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 14:20:55 +0200 Subject: [PATCH 58/60] default values for Fragment arguments --- src/asammdf/blocks/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/asammdf/blocks/utils.py b/src/asammdf/blocks/utils.py index 52543f00b..e9b2b3390 100644 --- a/src/asammdf/blocks/utils.py +++ b/src/asammdf/blocks/utils.py @@ -1518,9 +1518,9 @@ class Fragment: def __init__( self, data, - record_offset, - record_count, - invalidation_data, + record_offset=-1, + record_count=-1, + invalidation_data=None, is_record=True, ) -> None: self.data = data From 6b284f893add15ff264791b359f6704788876b1e Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Tue, 14 Jan 2025 16:26:35 +0200 Subject: [PATCH 59/60] handle LIN scale ranges --- pyproject.toml | 2 +- src/asammdf/blocks/bus_logging_utils.py | 20 ++++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c6b9c0a14..73ab06339 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,7 +103,7 @@ skip = ["pp*", "*_ppc64le", "*-musllinux*", "*_s390x"] [tool.ruff] target-version = "py39" -extend-exclude = ["./src/asammdf/gui/ui"] +extend-exclude = ["./src/asammdf/gui/ui", "./ext"] force-exclude = true [tool.ruff.lint] diff --git a/src/asammdf/blocks/bus_logging_utils.py b/src/asammdf/blocks/bus_logging_utils.py index 7cbb311b9..f422e753a 100644 --- a/src/asammdf/blocks/bus_logging_utils.py +++ b/src/asammdf/blocks/bus_logging_utils.py @@ -441,8 +441,24 @@ def get_conversion(signal: Signal) -> v4b.ChannelConversion | None: a, b = float(signal.factor), float(signal.offset) - if signal.values: - conv = {} + conv = {} + + scale_ranges = getattr(signal, "scale_ranges", None) + if scale_ranges: + for i, scale_info in enumerate(scale_ranges): + conv[f"upper_{i}"] = scale_info["max"] + conv[f"lower_{i}"] = scale_info["min"] + conv[f"text_{i}"] = from_dict({"a": scale_info["factor"], "b": scale_info["offset"]}) + + for i, (val, text) in enumerate(signal.values.items(), len(scale_ranges)): + conv[f"upper_{i}"] = val + conv[f"lower_{i}"] = val + conv[f"text_{i}"] = text + + conv["default"] = from_dict({"a": a, "b": b}) + + elif signal.values: + for i, (val, text) in enumerate(signal.values.items()): conv[f"upper_{i}"] = val conv[f"lower_{i}"] = val From d254e6c474979e41746ad24867693e7d5e831871 Mon Sep 17 00:00:00 2001 From: danielhrisca Date: Wed, 15 Jan 2025 08:38:00 +0200 Subject: [PATCH 60/60] improve LDF channel unit in case of scale ranges --- src/asammdf/blocks/bus_logging_utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/asammdf/blocks/bus_logging_utils.py b/src/asammdf/blocks/bus_logging_utils.py index f422e753a..fdecca615 100644 --- a/src/asammdf/blocks/bus_logging_utils.py +++ b/src/asammdf/blocks/bus_logging_utils.py @@ -396,10 +396,16 @@ def extract_mux( sig_name = sig.name try: + scale_ranges = getattr(sig, "scale_ranges", None) + if scale_ranges: + unit = scale_ranges[0]["unit"] or "" + else: + unit = sig.unit or "" + signals[sig_name] = { "name": sig_name, "comment": sig.comment or "", - "unit": sig.unit or "", + "unit": unit, "samples": samples if raw else apply_conversion(samples, sig, ignore_value2text_conversion), "conversion": get_conversion(sig) if raw else None, "t": t_,