Skip to content

Commit

Permalink
Use synchronous copy to framework array in the absence of a stream (#…
Browse files Browse the repository at this point in the history
…5364)

Signed-off-by: Kamil Tokarski <[email protected]>
  • Loading branch information
stiepan committed Mar 11, 2024
1 parent 3b35149 commit e2ae685
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 4 deletions.
3 changes: 2 additions & 1 deletion dali/python/nvidia/dali/plugin/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,13 @@ def feed_ndarray(dali_tensor, arr, cuda_stream=None):
ptr = ctypes.c_void_p()
mx.base._LIB.MXNDArrayGetData(arr.handle, ctypes.byref(ptr))

non_blocking = cuda_stream is not None
cuda_stream = types._raw_cuda_stream(cuda_stream)

# Copy data from DALI tensor to ptr
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(ptr, stream, non_blocking=True)
dali_tensor.copy_to_external(ptr, stream, non_blocking=non_blocking)
else:
dali_tensor.copy_to_external(ptr)

Expand Down
3 changes: 2 additions & 1 deletion dali/python/nvidia/dali/plugin/paddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,13 @@ def feed_ndarray(dali_tensor, ptr, cuda_stream=None):
(if not provided, an internal user stream will be selected)
"""

non_blocking = cuda_stream is not None
cuda_stream = types._raw_cuda_stream(cuda_stream)

c_type_pointer = ctypes.c_void_p(ptr)
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=True)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=non_blocking)
else:
dali_tensor.copy_to_external(c_type_pointer)
return ptr
Expand Down
6 changes: 4 additions & 2 deletions dali/python/nvidia/dali/plugin/pytorch/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2017-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -85,13 +85,15 @@ def feed_ndarray(
), "Shapes do not match: DALI tensor has size {0}, but PyTorch Tensor has size {1}".format(
dali_tensor.shape(), list(arr.size())
)

non_blocking = cuda_stream is not None
cuda_stream = types._raw_cuda_stream(cuda_stream)

# turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=True)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=non_blocking)
else:
dali_tensor.copy_to_external(c_type_pointer)
return arr
Expand Down

0 comments on commit e2ae685

Please sign in to comment.