Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cpp/examples/basic/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ cmake_minimum_required(VERSION 3.30.4)

include(../set_cuda_architecture.cmake)

# initialize cuda architecture
# initialize CUDA architectures
rapids_cuda_init_architectures(basic_example)

project(
Expand Down
1 change: 0 additions & 1 deletion cpp/include/doxygen_groups.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
* @defgroup cuda_streams CUDA Streams
* @defgroup data_containers Data Containers
* @defgroup errors Errors
* @defgroup logging Logging
* @defgroup thrust_integrations Thrust Integrations
* @defgroup utilities Utilities
*/
2 changes: 1 addition & 1 deletion cpp/include/rmm/cuda_stream.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class cuda_stream {
cuda_stream& operator=(cuda_stream&) = delete;

/**
* @brief Construct a new cuda stream object
* @brief Construct a new CUDA stream object
*
* @param flags Stream creation flags.
*
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/rmm/cuda_stream_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class cuda_stream_pool {
static constexpr std::size_t default_size{16}; ///< Default stream pool size

/**
* @brief Construct a new cuda stream pool object of the given non-zero size
* @brief Construct a new CUDA stream pool object of the given non-zero size
*
* @throws logic_error if `pool_size` is zero
* @param pool_size The number of streams in the pool
Expand Down
17 changes: 8 additions & 9 deletions cpp/include/rmm/detail/error.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@
* specified.
*
* Example usage:
* ```
* // throws rmm::logic_error
* ```cpp
* // Throws rmm::logic_error
* RMM_EXPECTS(p != nullptr, "Unexpected null pointer");
*
* // throws std::runtime_error
* // Throws std::runtime_error
* RMM_EXPECTS(p != nullptr, "Unexpected nullptr", std::runtime_error);
* ```
* @param ... This macro accepts either two or three arguments:
Expand Down Expand Up @@ -60,7 +60,7 @@
* @brief Indicates that an erroneous code path has been taken.
*
* Example usage:
* ```c++
* ```cpp
* // Throws rmm::logic_error
* RMM_FAIL("Unsupported code path");
*
Expand Down Expand Up @@ -91,8 +91,7 @@
* specified.
*
* Example:
* ```c++
*
* ```cpp
* // Throws rmm::cuda_error if `cudaMalloc` fails
* RMM_CUDA_TRY(cudaMalloc(&p, 100));
*
Expand Down Expand Up @@ -176,14 +175,14 @@
* equal to `cudaSuccess`.
*
*
* Replaces usecases such as:
* ```
* Replaces use cases such as:
* ```cpp
* auto status = cudaRuntimeApi(...);
* assert(status == cudaSuccess);
* ```
*
* Example:
* ```
* ```cpp
* RMM_ASSERT_CUDA_SUCCESS(cudaRuntimeApi(...));
* ```
*
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/rmm/detail/nvtx/ranges.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct librmm_domain {
* Customizes an NVTX range with the given input.
*
* Example:
* ```
* ```cpp
* void some_function(){
* rmm::scoped_range rng{"custom_name"}; // Customizes range name
* ...
Expand All @@ -41,7 +41,7 @@ using scoped_range = ::nvtx3::scoped_range_in<librmm_domain>;
* name the range.
*
* Example:
* ```
* ```cpp
* void some_function(){
* RMM_FUNC_RANGE();
* ...
Expand Down
18 changes: 9 additions & 9 deletions cpp/include/rmm/device_buffer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,28 +34,28 @@ namespace RMM_NAMESPACE {
* behavior to read the contents of `data()` before first initializing it.
*
* Examples:
* ```
* //Allocates at least 100 bytes of device memory using the default memory
* //resource and default stream.
* ```cpp
* // Allocates at least 100 bytes of device memory using the default memory
* // resource and default stream.
* device_buffer buff(100);
*
* // allocates at least 100 bytes using the custom memory resource and
* // Allocates at least 100 bytes using the custom memory resource and
* // specified stream
* custom_memory_resource mr;
* cuda_stream_view stream = cuda_stream_view{};
* device_buffer custom_buff(100, stream, &mr);
*
* // deep copies `buff` into a new device buffer using the specified stream
* // Deep copies `buff` into a new device buffer using the specified stream
* device_buffer buff_copy(buff, stream);
*
* // moves the memory in `from_buff` to `to_buff`. Deallocates previously allocated
* // Moves the memory in `from_buff` to `to_buff`. Deallocates previously allocated
* // to_buff memory on `to_buff.stream()`.
* device_buffer to_buff(std::move(from_buff));
*
* // deep copies `buff` into a new device buffer using the specified stream
* // Deep copies `buff` into a new device buffer using the specified stream
* device_buffer buff_copy(buff, stream);
*
* // shallow copies `buff` into a new device_buffer, `buff` is now empty
* // Shallow copies `buff` into a new device_buffer, `buff` is now empty
* device_buffer buff_move(std::move(buff));
*
* // Default construction. Buffer is empty
Expand All @@ -65,7 +65,7 @@ namespace RMM_NAMESPACE {
* // deep copies any previous contents. Otherwise, simply updates the value of `size()` to the
* // newly requested size without any allocations or copies. Uses the specified stream.
* buff_default.resize(100, stream);
*```
* ```
*/
class device_buffer {
public:
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/rmm/device_scalar.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class device_scalar {
* referenced by `v` should not be destroyed or modified until `stream` has been
* synchronized. Otherwise, behavior is undefined.
*
* @note: This function incurs a host to device memcpy or device memset and should be used
* @note This function incurs a host to device memcpy or device memset and should be used
* carefully.
*
* Example:
Expand Down Expand Up @@ -209,7 +209,7 @@ class device_scalar {
*
* This function does not synchronize `stream` before returning.
*
* @note: This function incurs a device memset and should be used carefully.
* @note This function incurs a device memset and should be used carefully.
*
* @param stream CUDA stream on which to perform the copy
*/
Expand Down
14 changes: 7 additions & 7 deletions cpp/include/rmm/device_uvector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,14 @@ class device_uvector {
"device_uvector only supports types that are trivially copyable.");

public:
using value_type = T; ///< T; stored value type
using value_type = T; ///< Stored value type
using size_type = std::size_t; ///< The type used for the size of the vector
using reference = value_type&; ///< value_type&; reference type returned by operator[](size_type)
using const_reference = value_type const&; ///< value_type const&; constant reference type
///< returned by operator[](size_type) const
using pointer = value_type*; ///< The type of the pointer returned by data()
using const_pointer = value_type const*; ///< The type of the pointer returned by data() const
using iterator = pointer; ///< The type of the iterator returned by begin()
using reference = value_type&; ///< Reference type returned by operator[](size_type)
using const_reference =
value_type const&; ///< Constant reference type returned by operator[](size_type) const
using pointer = value_type*; ///< The type of the pointer returned by data()
using const_pointer = value_type const*; ///< The type of the pointer returned by data() const
using iterator = pointer; ///< The type of the iterator returned by begin()
using const_iterator = const_pointer; ///< The type of the const iterator returned by cbegin()
using reverse_iterator =
thrust::reverse_iterator<iterator>; ///< The type of the iterator returned by rbegin()
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/rmm/mr/device/callback_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace mr {
* @brief Callback function type used by callback memory resource for allocation.
*
* The signature of the callback function is:
* `void* allocate_callback_t(std::size_t bytes, cuda_stream_view stream, void* arg);
* `void* allocate_callback_t(std::size_t bytes, cuda_stream_view stream, void* arg);`
*
* * Returns a pointer to an allocation of at least `bytes` usable immediately on
* `stream`. The stream-ordered behavior requirements are identical to
Expand All @@ -40,7 +40,7 @@ using allocate_callback_t = std::function<void*(std::size_t, cuda_stream_view, v
* @brief Callback function type used by callback_memory_resource for deallocation.
*
* The signature of the callback function is:
* `void deallocate_callback_t(void* ptr, std::size_t bytes, cuda_stream_view stream, void* arg);
* `void deallocate_callback_t(void* ptr, std::size_t bytes, cuda_stream_view stream, void* arg);`
*
* * Deallocates memory pointed to by `ptr`. `bytes` specifies the size of the allocation
* in bytes, and must equal the value of `bytes` that was passed to the allocate callback
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/rmm/mr/device/owning_wrapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class owning_wrapper : public device_memory_resource {
* };
*
* using cuda = rmm::mr::cuda_memory_resource;
* using example = example_resource<cuda,cuda>;
* using example = example_resource<cuda, cuda>;
* using wrapped_example = rmm::mr::owning_wrapper<example, cuda, cuda>;
* auto cuda_mr = std::make_shared<cuda>();
*
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/rmm/mr/device/polymorphic_allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ class stream_allocator_adaptor {
/**
* @brief Construct a `stream_allocator_adaptor` using `a` as the underlying allocator.
*
* @note: The `stream` must not be destroyed before the `stream_allocator_adaptor`, otherwise
* @note The `stream` must not be destroyed before the `stream_allocator_adaptor`, otherwise
* behavior is undefined.
*
* @param allocator The stream ordered allocator to use as the underlying allocator
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/rmm/mr/device/thrust_allocator_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace mr {
* allocate objects of a specific type `T`, but can be freely rebound to other
* types.
*
* The allocator records the current cuda device and may only be used with a backing
* The allocator records the current CUDA device and may only be used with a backing
* `device_async_resource_ref` valid for the same device.
*
* @tparam T The type of the objects that will be allocated by this allocator
Expand Down
3 changes: 1 addition & 2 deletions cpp/include/rmm/mr/device/tracking_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ namespace mr {
* the project is linked with `-rdynamic`. This can be accomplished with
* `add_link_options(-rdynamic)` in cmake.
*
* @tparam Upstream Type of the upstream resource used for
* allocation/deallocation.
* @tparam Upstream Type of the upstream resource used for allocation/deallocation.
*/
template <typename Upstream>
class tracking_resource_adaptor final : public device_memory_resource {
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/rmm/mr/pinned_host_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace RMM_NAMESPACE {
namespace mr {

/**
* @addtogroup memory_resources
* @addtogroup device_memory_resources
* @{
* @file
*/
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ ConfigureTest(PINNED_MR_TEST mr/host/pinned_mr_tests.cpp)
# pinned pool mr tests
ConfigureTest(PINNED_POOL_MR_TEST mr/host/pinned_pool_mr_tests.cpp)

# cuda stream tests
# CUDA stream tests
ConfigureTest(CUDA_STREAM_TEST cuda_stream_tests.cpp cuda_stream_pool_tests.cpp)

# device buffer tests
Expand Down
8 changes: 4 additions & 4 deletions cpp/tests/container_multidevice_tests.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ TYPED_TEST_SUITE(ContainerMultiDeviceTest, containers);

TYPED_TEST(ContainerMultiDeviceTest, CreateDestroyDifferentActiveDevice)
{
// Get the number of cuda devices
// Get the number of CUDA devices
int num_devices = rmm::get_num_cuda_devices();

// only run on multidevice systems
Expand All @@ -55,7 +55,7 @@ TYPED_TEST(ContainerMultiDeviceTest, CreateDestroyDifferentActiveDevice)

TYPED_TEST(ContainerMultiDeviceTest, CreateMoveDestroyDifferentActiveDevice)
{
// Get the number of cuda devices
// Get the number of CUDA devices
int num_devices = rmm::get_num_cuda_devices();

// only run on multidevice systems
Expand Down Expand Up @@ -95,7 +95,7 @@ TYPED_TEST(ContainerMultiDeviceTest, CreateMoveDestroyDifferentActiveDevice)

TYPED_TEST(ContainerMultiDeviceTest, ResizeDifferentActiveDevice)
{
// Get the number of cuda devices
// Get the number of CUDA devices
int num_devices = rmm::get_num_cuda_devices();

// only run on multidevice systems
Expand All @@ -118,7 +118,7 @@ TYPED_TEST(ContainerMultiDeviceTest, ResizeDifferentActiveDevice)

TYPED_TEST(ContainerMultiDeviceTest, ShrinkDifferentActiveDevice)
{
// Get the number of cuda devices
// Get the number of CUDA devices
int num_devices = rmm::get_num_cuda_devices();

// only run on multidevice systems
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/logger_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class raii_temp_directory {
*
* Events in the log file are expected to occur in the same order as in `expected_events`.
*
* @note: This function accounts for the fact that `device_memory_resource` automatically pads
* @note This function accounts for the fact that `device_memory_resource` automatically pads
* allocations to a multiple of 8 bytes by rounding up the expected allocation sizes to a multiple
* of 8.
*
Expand Down
2 changes: 1 addition & 1 deletion cpp/tests/mr/device/pool_mr_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ TEST(PoolTest, MultidevicePool)
{
using MemoryResource = rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>;

// Get the number of cuda devices
// Get the number of CUDA devices
int num_devices = rmm::get_num_cuda_devices();

// only run on multidevice systems
Expand Down