diff --git a/cpp/examples/basic/CMakeLists.txt b/cpp/examples/basic/CMakeLists.txt index be24e895c..374c087b3 100644 --- a/cpp/examples/basic/CMakeLists.txt +++ b/cpp/examples/basic/CMakeLists.txt @@ -7,7 +7,7 @@ cmake_minimum_required(VERSION 3.30.4) include(../set_cuda_architecture.cmake) -# initialize cuda architecture +# initialize CUDA architectures rapids_cuda_init_architectures(basic_example) project( diff --git a/cpp/include/doxygen_groups.h b/cpp/include/doxygen_groups.h index 79ce708b2..213cc1d99 100644 --- a/cpp/include/doxygen_groups.h +++ b/cpp/include/doxygen_groups.h @@ -48,7 +48,6 @@ * @defgroup cuda_streams CUDA Streams * @defgroup data_containers Data Containers * @defgroup errors Errors - * @defgroup logging Logging * @defgroup thrust_integrations Thrust Integrations * @defgroup utilities Utilities */ diff --git a/cpp/include/rmm/cuda_stream.hpp b/cpp/include/rmm/cuda_stream.hpp index 4bb92d058..da620f5c5 100644 --- a/cpp/include/rmm/cuda_stream.hpp +++ b/cpp/include/rmm/cuda_stream.hpp @@ -56,7 +56,7 @@ class cuda_stream { cuda_stream& operator=(cuda_stream&) = delete; /** - * @brief Construct a new cuda stream object + * @brief Construct a new CUDA stream object * * @param flags Stream creation flags. * diff --git a/cpp/include/rmm/cuda_stream_pool.hpp b/cpp/include/rmm/cuda_stream_pool.hpp index 659499ce6..a0fbe93ca 100644 --- a/cpp/include/rmm/cuda_stream_pool.hpp +++ b/cpp/include/rmm/cuda_stream_pool.hpp @@ -33,7 +33,7 @@ class cuda_stream_pool { static constexpr std::size_t default_size{16}; ///< Default stream pool size /** - * @brief Construct a new cuda stream pool object of the given non-zero size + * @brief Construct a new CUDA stream pool object of the given non-zero size * * @throws logic_error if `pool_size` is zero * @param pool_size The number of streams in the pool diff --git a/cpp/include/rmm/detail/error.hpp b/cpp/include/rmm/detail/error.hpp index dee6ba090..d1296a0d3 100644 --- a/cpp/include/rmm/detail/error.hpp +++ b/cpp/include/rmm/detail/error.hpp @@ -26,11 +26,11 @@ * specified. * * Example usage: - * ``` - * // throws rmm::logic_error + * ```cpp + * // Throws rmm::logic_error * RMM_EXPECTS(p != nullptr, "Unexpected null pointer"); * - * // throws std::runtime_error + * // Throws std::runtime_error * RMM_EXPECTS(p != nullptr, "Unexpected nullptr", std::runtime_error); * ``` * @param ... This macro accepts either two or three arguments: @@ -60,7 +60,7 @@ * @brief Indicates that an erroneous code path has been taken. * * Example usage: - * ```c++ + * ```cpp * // Throws rmm::logic_error * RMM_FAIL("Unsupported code path"); * @@ -91,8 +91,7 @@ * specified. * * Example: - * ```c++ - * + * ```cpp * // Throws rmm::cuda_error if `cudaMalloc` fails * RMM_CUDA_TRY(cudaMalloc(&p, 100)); * @@ -176,14 +175,14 @@ * equal to `cudaSuccess`. * * - * Replaces usecases such as: - * ``` + * Replaces use cases such as: + * ```cpp * auto status = cudaRuntimeApi(...); * assert(status == cudaSuccess); * ``` * * Example: - * ``` + * ```cpp * RMM_ASSERT_CUDA_SUCCESS(cudaRuntimeApi(...)); * ``` * diff --git a/cpp/include/rmm/detail/nvtx/ranges.hpp b/cpp/include/rmm/detail/nvtx/ranges.hpp index 6278bfa19..9ab0aadf5 100644 --- a/cpp/include/rmm/detail/nvtx/ranges.hpp +++ b/cpp/include/rmm/detail/nvtx/ranges.hpp @@ -22,7 +22,7 @@ struct librmm_domain { * Customizes an NVTX range with the given input. * * Example: - * ``` + * ```cpp * void some_function(){ * rmm::scoped_range rng{"custom_name"}; // Customizes range name * ... @@ -41,7 +41,7 @@ using scoped_range = ::nvtx3::scoped_range_in; * name the range. * * Example: - * ``` + * ```cpp * void some_function(){ * RMM_FUNC_RANGE(); * ... diff --git a/cpp/include/rmm/device_buffer.hpp b/cpp/include/rmm/device_buffer.hpp index 369d2ad5e..aac0cec9f 100644 --- a/cpp/include/rmm/device_buffer.hpp +++ b/cpp/include/rmm/device_buffer.hpp @@ -34,28 +34,28 @@ namespace RMM_NAMESPACE { * behavior to read the contents of `data()` before first initializing it. * * Examples: - * ``` - * //Allocates at least 100 bytes of device memory using the default memory - * //resource and default stream. + * ```cpp + * // Allocates at least 100 bytes of device memory using the default memory + * // resource and default stream. * device_buffer buff(100); * - * // allocates at least 100 bytes using the custom memory resource and + * // Allocates at least 100 bytes using the custom memory resource and * // specified stream * custom_memory_resource mr; * cuda_stream_view stream = cuda_stream_view{}; * device_buffer custom_buff(100, stream, &mr); * - * // deep copies `buff` into a new device buffer using the specified stream + * // Deep copies `buff` into a new device buffer using the specified stream * device_buffer buff_copy(buff, stream); * - * // moves the memory in `from_buff` to `to_buff`. Deallocates previously allocated + * // Moves the memory in `from_buff` to `to_buff`. Deallocates previously allocated * // to_buff memory on `to_buff.stream()`. * device_buffer to_buff(std::move(from_buff)); * - * // deep copies `buff` into a new device buffer using the specified stream + * // Deep copies `buff` into a new device buffer using the specified stream * device_buffer buff_copy(buff, stream); * - * // shallow copies `buff` into a new device_buffer, `buff` is now empty + * // Shallow copies `buff` into a new device_buffer, `buff` is now empty * device_buffer buff_move(std::move(buff)); * * // Default construction. Buffer is empty @@ -65,7 +65,7 @@ namespace RMM_NAMESPACE { * // deep copies any previous contents. Otherwise, simply updates the value of `size()` to the * // newly requested size without any allocations or copies. Uses the specified stream. * buff_default.resize(100, stream); - *``` + * ``` */ class device_buffer { public: diff --git a/cpp/include/rmm/device_scalar.hpp b/cpp/include/rmm/device_scalar.hpp index 632fc6f97..4645cbe2d 100644 --- a/cpp/include/rmm/device_scalar.hpp +++ b/cpp/include/rmm/device_scalar.hpp @@ -168,7 +168,7 @@ class device_scalar { * referenced by `v` should not be destroyed or modified until `stream` has been * synchronized. Otherwise, behavior is undefined. * - * @note: This function incurs a host to device memcpy or device memset and should be used + * @note This function incurs a host to device memcpy or device memset and should be used * carefully. * * Example: @@ -209,7 +209,7 @@ class device_scalar { * * This function does not synchronize `stream` before returning. * - * @note: This function incurs a device memset and should be used carefully. + * @note This function incurs a device memset and should be used carefully. * * @param stream CUDA stream on which to perform the copy */ diff --git a/cpp/include/rmm/device_uvector.hpp b/cpp/include/rmm/device_uvector.hpp index f84456fd6..ea748ed17 100644 --- a/cpp/include/rmm/device_uvector.hpp +++ b/cpp/include/rmm/device_uvector.hpp @@ -70,14 +70,14 @@ class device_uvector { "device_uvector only supports types that are trivially copyable."); public: - using value_type = T; ///< T; stored value type + using value_type = T; ///< Stored value type using size_type = std::size_t; ///< The type used for the size of the vector - using reference = value_type&; ///< value_type&; reference type returned by operator[](size_type) - using const_reference = value_type const&; ///< value_type const&; constant reference type - ///< returned by operator[](size_type) const - using pointer = value_type*; ///< The type of the pointer returned by data() - using const_pointer = value_type const*; ///< The type of the pointer returned by data() const - using iterator = pointer; ///< The type of the iterator returned by begin() + using reference = value_type&; ///< Reference type returned by operator[](size_type) + using const_reference = + value_type const&; ///< Constant reference type returned by operator[](size_type) const + using pointer = value_type*; ///< The type of the pointer returned by data() + using const_pointer = value_type const*; ///< The type of the pointer returned by data() const + using iterator = pointer; ///< The type of the iterator returned by begin() using const_iterator = const_pointer; ///< The type of the const iterator returned by cbegin() using reverse_iterator = thrust::reverse_iterator; ///< The type of the iterator returned by rbegin() diff --git a/cpp/include/rmm/mr/device/callback_memory_resource.hpp b/cpp/include/rmm/mr/device/callback_memory_resource.hpp index 877e211cb..99b827e94 100644 --- a/cpp/include/rmm/mr/device/callback_memory_resource.hpp +++ b/cpp/include/rmm/mr/device/callback_memory_resource.hpp @@ -23,7 +23,7 @@ namespace mr { * @brief Callback function type used by callback memory resource for allocation. * * The signature of the callback function is: - * `void* allocate_callback_t(std::size_t bytes, cuda_stream_view stream, void* arg); + * `void* allocate_callback_t(std::size_t bytes, cuda_stream_view stream, void* arg);` * * * Returns a pointer to an allocation of at least `bytes` usable immediately on * `stream`. The stream-ordered behavior requirements are identical to @@ -40,7 +40,7 @@ using allocate_callback_t = std::function; + * using example = example_resource; * using wrapped_example = rmm::mr::owning_wrapper; * auto cuda_mr = std::make_shared(); * diff --git a/cpp/include/rmm/mr/device/polymorphic_allocator.hpp b/cpp/include/rmm/mr/device/polymorphic_allocator.hpp index b7a4b7a22..65a748d87 100644 --- a/cpp/include/rmm/mr/device/polymorphic_allocator.hpp +++ b/cpp/include/rmm/mr/device/polymorphic_allocator.hpp @@ -175,7 +175,7 @@ class stream_allocator_adaptor { /** * @brief Construct a `stream_allocator_adaptor` using `a` as the underlying allocator. * - * @note: The `stream` must not be destroyed before the `stream_allocator_adaptor`, otherwise + * @note The `stream` must not be destroyed before the `stream_allocator_adaptor`, otherwise * behavior is undefined. * * @param allocator The stream ordered allocator to use as the underlying allocator diff --git a/cpp/include/rmm/mr/device/thrust_allocator_adaptor.hpp b/cpp/include/rmm/mr/device/thrust_allocator_adaptor.hpp index ec05f8f36..dcdf9c387 100644 --- a/cpp/include/rmm/mr/device/thrust_allocator_adaptor.hpp +++ b/cpp/include/rmm/mr/device/thrust_allocator_adaptor.hpp @@ -31,7 +31,7 @@ namespace mr { * allocate objects of a specific type `T`, but can be freely rebound to other * types. * - * The allocator records the current cuda device and may only be used with a backing + * The allocator records the current CUDA device and may only be used with a backing * `device_async_resource_ref` valid for the same device. * * @tparam T The type of the objects that will be allocated by this allocator diff --git a/cpp/include/rmm/mr/device/tracking_resource_adaptor.hpp b/cpp/include/rmm/mr/device/tracking_resource_adaptor.hpp index 0f77ac173..f0037dfc9 100644 --- a/cpp/include/rmm/mr/device/tracking_resource_adaptor.hpp +++ b/cpp/include/rmm/mr/device/tracking_resource_adaptor.hpp @@ -38,8 +38,7 @@ namespace mr { * the project is linked with `-rdynamic`. This can be accomplished with * `add_link_options(-rdynamic)` in cmake. * - * @tparam Upstream Type of the upstream resource used for - * allocation/deallocation. + * @tparam Upstream Type of the upstream resource used for allocation/deallocation. */ template class tracking_resource_adaptor final : public device_memory_resource { diff --git a/cpp/include/rmm/mr/pinned_host_memory_resource.hpp b/cpp/include/rmm/mr/pinned_host_memory_resource.hpp index 9084e116f..7f05a34aa 100644 --- a/cpp/include/rmm/mr/pinned_host_memory_resource.hpp +++ b/cpp/include/rmm/mr/pinned_host_memory_resource.hpp @@ -22,7 +22,7 @@ namespace RMM_NAMESPACE { namespace mr { /** - * @addtogroup memory_resources + * @addtogroup device_memory_resources * @{ * @file */ diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 9e6fdb493..52d0ac26f 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -190,7 +190,7 @@ ConfigureTest(PINNED_MR_TEST mr/host/pinned_mr_tests.cpp) # pinned pool mr tests ConfigureTest(PINNED_POOL_MR_TEST mr/host/pinned_pool_mr_tests.cpp) -# cuda stream tests +# CUDA stream tests ConfigureTest(CUDA_STREAM_TEST cuda_stream_tests.cpp cuda_stream_pool_tests.cpp) # device buffer tests diff --git a/cpp/tests/container_multidevice_tests.cu b/cpp/tests/container_multidevice_tests.cu index 4e183c549..ef41ea733 100644 --- a/cpp/tests/container_multidevice_tests.cu +++ b/cpp/tests/container_multidevice_tests.cu @@ -28,7 +28,7 @@ TYPED_TEST_SUITE(ContainerMultiDeviceTest, containers); TYPED_TEST(ContainerMultiDeviceTest, CreateDestroyDifferentActiveDevice) { - // Get the number of cuda devices + // Get the number of CUDA devices int num_devices = rmm::get_num_cuda_devices(); // only run on multidevice systems @@ -55,7 +55,7 @@ TYPED_TEST(ContainerMultiDeviceTest, CreateDestroyDifferentActiveDevice) TYPED_TEST(ContainerMultiDeviceTest, CreateMoveDestroyDifferentActiveDevice) { - // Get the number of cuda devices + // Get the number of CUDA devices int num_devices = rmm::get_num_cuda_devices(); // only run on multidevice systems @@ -95,7 +95,7 @@ TYPED_TEST(ContainerMultiDeviceTest, CreateMoveDestroyDifferentActiveDevice) TYPED_TEST(ContainerMultiDeviceTest, ResizeDifferentActiveDevice) { - // Get the number of cuda devices + // Get the number of CUDA devices int num_devices = rmm::get_num_cuda_devices(); // only run on multidevice systems @@ -118,7 +118,7 @@ TYPED_TEST(ContainerMultiDeviceTest, ResizeDifferentActiveDevice) TYPED_TEST(ContainerMultiDeviceTest, ShrinkDifferentActiveDevice) { - // Get the number of cuda devices + // Get the number of CUDA devices int num_devices = rmm::get_num_cuda_devices(); // only run on multidevice systems diff --git a/cpp/tests/logger_tests.cpp b/cpp/tests/logger_tests.cpp index 1f8c9cc24..b756c01cf 100644 --- a/cpp/tests/logger_tests.cpp +++ b/cpp/tests/logger_tests.cpp @@ -84,7 +84,7 @@ class raii_temp_directory { * * Events in the log file are expected to occur in the same order as in `expected_events`. * - * @note: This function accounts for the fact that `device_memory_resource` automatically pads + * @note This function accounts for the fact that `device_memory_resource` automatically pads * allocations to a multiple of 8 bytes by rounding up the expected allocation sizes to a multiple * of 8. * diff --git a/cpp/tests/mr/device/pool_mr_tests.cpp b/cpp/tests/mr/device/pool_mr_tests.cpp index a17e5675d..1b67c945b 100644 --- a/cpp/tests/mr/device/pool_mr_tests.cpp +++ b/cpp/tests/mr/device/pool_mr_tests.cpp @@ -143,7 +143,7 @@ TEST(PoolTest, MultidevicePool) { using MemoryResource = rmm::mr::pool_memory_resource; - // Get the number of cuda devices + // Get the number of CUDA devices int num_devices = rmm::get_num_cuda_devices(); // only run on multidevice systems