Skip to content

Commit

Permalink
onwards
Browse files Browse the repository at this point in the history
  • Loading branch information
danielhrisca committed Dec 19, 2024
1 parent d8c154c commit 6415e71
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 26 deletions.
89 changes: 78 additions & 11 deletions src/asammdf/blocks/cutils.c
Original file line number Diff line number Diff line change
Expand Up @@ -1944,16 +1944,27 @@ typedef struct InfoBlock {
} InfoBlock, *PtrInfoBlock;


typedef struct SignalInfo{
int64_t byte_offset;
int64_t byte_count;
int32_t invalidation_bit_position;
uint8_t *data;
uint8_t *data_position;
PyObject *obj;
} SignalInfo, *PtrSignalInfo;


#if defined(_WIN32)

typedef struct ProcessesingBlock {
uint8_t stop;
Py_ssize_t out_size;
uint8_t * outptr;
uint8_t ** outptr;
int64_t cycles;
uint8_t * inptr;
PtrInfoBlock block_info;
Py_ssize_t byte_offset;
Py_ssize_t byte_count;
struct SignalInfo *signals;
Py_ssize_t signal_count;
Py_ssize_t record_size;
Py_ssize_t idx;
Py_ssize_t use_miniz;
Expand Down Expand Up @@ -2081,15 +2092,17 @@ void * get_channel_raw_bytes_complete_C_windows(void *lpParam )

static PyObject *get_channel_raw_bytes_complete_windows(PyObject *self, PyObject *args)
{
Py_ssize_t info_count, thread_count=11, use_miniz=0;
PyObject *data_blocks_info, *out = NULL, *item, *ref;
Py_ssize_t info_count, signal_count, thread_count=11, use_miniz=0;
PyObject *data_blocks_info, *signals, *out = NULL, *item, *ref;

char *outptr, *file_name, *deflate_lib_path=NULL;
char *read_pos = NULL, *write_pos = NULL;
Py_ssize_t position = 0, record_size = 0,
cycles, step = 0;
Py_ssize_t isize = 0, offset = 0,byte_count, byte_offset;
int is_list;
int64_t byte_offset, byte_count;
int32_t invalidation_bit_position;

PtrInfoBlock block_info;
InfoBlock info_block;
Expand All @@ -2099,8 +2112,10 @@ static PyObject *get_channel_raw_bytes_complete_windows(PyObject *self, PyObject
FILE *fptr;
uint8_t *buffer;
int result;
int is_list;

if (!PyArg_ParseTuple(args, "Osnnnns|nn", &data_blocks_info, &file_name, &cycles, &record_size, &byte_offset, &byte_count, &deflate_lib_path, &thread_count, &use_miniz))
if (!PyArg_ParseTuple(args, "OOsnnns|nn",
&data_blocks_info, &signals, &file_name, &cycles, &record_size, &invalidation_bytes, &deflate_lib_path, &thread_count, &use_miniz))
{
return NULL;
}
Expand All @@ -2114,6 +2129,62 @@ static PyObject *get_channel_raw_bytes_complete_windows(PyObject *self, PyObject
dwThreadIdArray = (DWORD *) malloc(sizeof(DWORD) * thread_count);
block_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count);
bytes_ready = (HANDLE *) malloc(sizeof(HANDLE) * thread_count);

PtrSignalInfo signal_info;

is_list = PyList_Check(signals);
if (is_list) {
signal_count = PyList_Size(signals);
}
else {
signal_count = PyTuple_Size(signals);
}

if (invalidation_bytes) {
signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * (signal_count + 1));
}
else {
signal_info = (PtrSignalInfo) malloc(sizeof(SignalInfo) * signal_count);
}
for (int i=0; i<signal_count; i++) {
if (is_list) {
obj = PyList_GetItem(signals, i);
}
else {
obj = PyTuple_GetItem(signals, i);
}

if (PyList_Check(obj)) {
byte_offset = PyLong_AsLongLong(PyList_GetItem(obj, 0));
byte_count = PyLong_AsLongLong(PyList_GetItem(obj, 1));
invalidation_bit_position = PyLong_AsLong(PyList_GetItem(obj, 2));
}
else {
byte_offset = PyLong_AsLongLong(PyTuple_GetItem(obj, 0));
byte_count = PyLong_AsLongLong(PyTuple_GetItem(obj, 1));
invalidation_bit_position = PyLong_AsLong(PyTuple_GetItem(obj, 2));
}

obj = PyByteArray_FromStringAndSize(NULL, byte_count * cycles);

signal_info[i].byte_offset = byte_offset;
signal_info[i].byte_count = byte_count;
signal_info[i].invalidation_bit_position = invalidation_bit_position;
signal_info[i].data = (uint8_t *) PyByteArray_AsString(obj);
signal_info[i].data_position = signal_info[i].data;
signal_info[i].obj = obj;

}

if (invalidation_bytes) {
obj = PyByteArray_FromStringAndSize(NULL, invalidation_bytes * cycles);
signal_info[signal_count].byte_offset = record_size - invalidation_bytes;
signal_info[signal_count].byte_count = invalidation_bytes;
signal_info[signal_count].invalidation_bit_position = -1;
signal_info[signal_count].data = (uint8_t *) PyByteArray_AsString(obj);
signal_info[signal_count].data_position = signal_info[signal_count].data;
signal_info[signal_count].obj = obj;
}

is_list = PyList_Check(data_blocks_info);
if (is_list) {
Expand All @@ -2124,11 +2195,7 @@ static PyObject *get_channel_raw_bytes_complete_windows(PyObject *self, PyObject
}


if (!info_count)
{
out = PyBytes_FromStringAndSize(NULL, 0);
}
else
if (info_count)
{
if (info_count < thread_count) {
thread_count = info_count;
Expand Down
31 changes: 16 additions & 15 deletions src/asammdf/blocks/mdf_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -1887,6 +1887,9 @@ def _get_data_blocks_info(
else:
READ_CHUNK_SIZE = 32 * 1024 * 1024

if READ_CHUNK_SIZE > total_size:
READ_CHUNK_SIZE = total_size

if mapped:
if address:
id_string, block_len = COMMON_SHORT_uf(stream, address)
Expand All @@ -1895,6 +1898,9 @@ def _get_data_blocks_info(
if id_string == block_type:
size = block_len - 24
if size:
if size > total_size:
size = total_size

address = address + COMMON_SIZE

# split the DTBLOCK into chucks of up to 32MB
Expand All @@ -1913,18 +1919,14 @@ def _get_data_blocks_info(
)
address += READ_CHUNK_SIZE
else:
if total_size < size:
block_limit = total_size
else:
block_limit = None

yield DataBlockInfo(
address=address,
block_type=v4c.DT_BLOCK,
original_size=size,
compressed_size=size,
param=0,
block_limit=block_limit,
block_limit=None,
)
break

Expand All @@ -1943,10 +1945,12 @@ def _get_data_blocks_info(
param = 0
else:
block_type_ = v4c.DZ_BLOCK_TRANSPOSED

if total_size < original_size:
block_limit = total_size
else:
block_limit = None

total_size -= original_size
yield DataBlockInfo(
address=address + v4c.DZ_COMMON_SIZE,
Expand All @@ -1969,6 +1973,9 @@ def _get_data_blocks_info(
if id_string == block_type:
size = block_len - 24
if size:
if size > total_size:
size = total_size

addr += COMMON_SIZE

# split the DTBLOCK into chucks of up to 32MB
Expand All @@ -1987,10 +1994,6 @@ def _get_data_blocks_info(
)
addr += READ_CHUNK_SIZE
else:
if total_size < size:
block_limit = total_size
else:
block_limit = None

total_size -= size

Expand All @@ -2000,7 +2003,7 @@ def _get_data_blocks_info(
original_size=size,
compressed_size=size,
param=0,
block_limit=block_limit,
block_limit=None,
)
break

Expand Down Expand Up @@ -2178,6 +2181,8 @@ def _get_data_blocks_info(
if id_string == block_type:
size = block_len - 24
if size:
if size > total_size:
size = total_size
address = address + COMMON_SIZE

# split the DTBLOCK into chucks of up to 32MB
Expand All @@ -2196,18 +2201,14 @@ def _get_data_blocks_info(
)
address += READ_CHUNK_SIZE
else:
if total_size < size:
block_limit = total_size
else:
block_limit = None

yield DataBlockInfo(
address=address,
block_type=v4c.DT_BLOCK,
original_size=size,
compressed_size=size,
param=0,
block_limit=block_limit,
block_limit=None,
)
break

Expand Down

0 comments on commit 6415e71

Please sign in to comment.