Skip to content

Commit a88f286

Browse files
authored
Merge branch 'main' into deprecate_cuda_stream_ref_header
2 parents ee2803a + 5a3e381 commit a88f286

File tree

76 files changed

+769
-891
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+769
-891
lines changed

cudax/examples/stdexec_stream.cu

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -17,48 +17,54 @@
1717
#include <cuda_runtime_api.h>
1818

1919
namespace cudax = cuda::experimental;
20-
namespace task = cudax::execution;
20+
namespace ex = cudax::execution;
2121

2222
// This example demonstrates how to use the experimental CUDA implementation of
2323
// C++26's std::execution async tasking framework.
2424

25-
struct say_hello
25+
int main()
2626
{
27-
__device__ int operator()() const
27+
try
2828
{
29-
printf("Hello from lambda on device!\n");
30-
return value;
31-
}
29+
auto tctx = ex::thread_context{};
30+
auto sctx = ex::stream_context{cuda::device_ref{0}};
31+
auto gpu = sctx.get_scheduler();
3232

33-
int value;
34-
};
33+
const auto bulk_shape = 10;
34+
const auto bulk_fn = [] __device__(const int index, int i) noexcept {
35+
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
36+
if (tid < bulk_shape)
37+
{
38+
printf("Hello from bulk task on device! index = %d, i = %d\n", index, i);
39+
}
40+
};
3541

36-
__host__ void run()
37-
{
38-
/*
39-
try
40-
{
41-
task::thread_context tctx;
42-
task::stream_context sctx{cuda::device_ref{0}};
43-
auto sch = sctx.get_scheduler();
42+
auto start =
43+
// begin work on the GPU:
44+
ex::schedule(gpu)
4445

45-
auto start = //
46-
task::schedule(sch) // begin work on the GPU
47-
| task::then(say_hello{42}) // enqueue a function object on the GPU
48-
| task::then([] __device__(int i) noexcept -> int { // enqueue a lambda on the GPU
49-
printf("Hello again from lambda on device! i = %d\n", i);
50-
return i + 1;
46+
// execute a device lambda on the GPU:
47+
| ex::then([] __device__() noexcept -> int {
48+
printf("Hello from lambda on device!\n");
49+
return 42;
5150
})
52-
| task::continues_on(tctx.get_scheduler()) // continue work on the CPU
53-
| task::then([] __host__ __device__(int i) noexcept -> int { // run a lambda on the CPU
51+
52+
// do some parallel work on the GPU:
53+
| ex::bulk(ex::par, bulk_shape, bulk_fn) //
54+
55+
// transfer execution back to the CPU:
56+
| ex::continues_on(tctx.get_scheduler())
57+
58+
// execute a host/device lambda on the CPU:
59+
| ex::then([] __host__ __device__(int i) noexcept -> int {
5460
NV_IF_TARGET(NV_IS_HOST,
5561
(printf("Hello from lambda on host! i = %d\n", i);),
5662
(printf("OOPS! still on the device! i = %d\n", i);))
57-
return i;
63+
return i + 1;
5864
});
5965

6066
// run the task, wait for it to finish, and get the result
61-
auto [i] = task::sync_wait(std::move(start)).value();
67+
auto [i] = ex::sync_wait(std::move(start)).value();
6268
printf("All done on the host! result = %d\n", i);
6369
}
6470
catch (cuda::cuda_error const& e)
@@ -73,10 +79,4 @@ __host__ void run()
7379
{
7480
std::printf("Unknown exception\n");
7581
}
76-
*/
77-
}
78-
79-
int main()
80-
{
81-
run();
8282
}

cudax/include/cuda/experimental/__container/async_buffer.cuh

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727

2828
#include <cuda/__memory_resource/get_memory_resource.h>
2929
#include <cuda/__memory_resource/properties.h>
30-
#include <cuda/__memory_resource/resource_ref.h>
3130
#include <cuda/__stream/get_stream.h>
3231
#include <cuda/std/__execution/env.h>
3332
#include <cuda/std/__iterator/concepts.h>
@@ -50,7 +49,6 @@
5049
#include <cuda/experimental/__execution/policy.cuh>
5150
#include <cuda/experimental/__launch/host_launch.cuh>
5251
#include <cuda/experimental/__memory_resource/any_resource.cuh>
53-
#include <cuda/experimental/__memory_resource/properties.cuh>
5452
#include <cuda/experimental/__utility/ensure_current_device.cuh>
5553

5654
#include <cuda/std/__cccl/prologue.h>
@@ -96,7 +94,7 @@ public:
9694
using const_reverse_iterator = ::cuda::std::reverse_iterator<const_iterator>;
9795
using size_type = ::cuda::std::size_t;
9896
using difference_type = ::cuda::std::ptrdiff_t;
99-
using properties_list = ::cuda::experimental::properties_list<_Properties...>;
97+
using properties_list = ::cuda::mr::properties_list<_Properties...>;
10098

10199
using __buffer_t = ::cuda::experimental::uninitialized_async_buffer<_Tp, _Properties...>;
102100
using __resource_t = ::cuda::experimental::any_resource<_Properties...>;
@@ -533,7 +531,7 @@ public:
533531

534532
//! @brief Causes the buffer to be treated as a span when passed to cudax::launch.
535533
//! @pre The buffer must have the cuda::mr::device_accessible property.
536-
template <class _DeviceAccessible = device_accessible>
534+
template <class _DeviceAccessible = ::cuda::mr::device_accessible>
537535
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
538536
transform_device_argument(::cuda::stream_ref, async_buffer& __self) noexcept
539537
_CCCL_TRAILING_REQUIRES(::cuda::std::span<_Tp>)(::cuda::std::__is_included_in_v<_DeviceAccessible, _Properties...>)
@@ -544,7 +542,7 @@ public:
544542

545543
//! @brief Causes the buffer to be treated as a span when passed to cudax::launch
546544
//! @pre The buffer must have the cuda::mr::device_accessible property.
547-
template <class _DeviceAccessible = device_accessible>
545+
template <class _DeviceAccessible = ::cuda::mr::device_accessible>
548546
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
549547
transform_device_argument(::cuda::stream_ref, const async_buffer& __self) noexcept _CCCL_TRAILING_REQUIRES(
550548
::cuda::std::span<const _Tp>)(::cuda::std::__is_included_in_v<_DeviceAccessible, _Properties...>)
@@ -634,7 +632,7 @@ async_buffer<_Tp, _TargetProperties...> make_async_buffer(
634632
}
635633

636634
_CCCL_TEMPLATE(class _Tp, class _Resource, class... _SourceProperties, class _Env = ::cuda::std::execution::env<>)
637-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND __has_default_queries<_Resource>)
635+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>)
638636
auto make_async_buffer(
639637
stream_ref __stream, _Resource&& __mr, const async_buffer<_Tp, _SourceProperties...>& __source, const _Env& __env = {})
640638
{
@@ -656,8 +654,8 @@ make_async_buffer(stream_ref __stream, any_resource<_Properties...> __mr, const
656654
}
657655

658656
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Env = ::cuda::std::execution::env<>)
659-
_CCCL_REQUIRES(
660-
::cuda::mr::resource<_Resource> _CCCL_AND __has_default_queries<_Resource> _CCCL_AND __buffer_compatible_env<_Env>)
657+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource> _CCCL_AND
658+
__buffer_compatible_env<_Env>)
661659
auto make_async_buffer(stream_ref __stream, _Resource&& __mr, const _Env& __env = {})
662660
{
663661
using __buffer_type = __buffer_type_for_props<_Tp, typename ::cuda::std::decay_t<_Resource>::default_queries>;
@@ -669,7 +667,11 @@ _CCCL_BEGIN_NAMESPACE_ARCH_DEPENDENT
669667
// Size and value make function
670668
template <class _Tp, class... _Properties, class _Env = ::cuda::std::execution::env<>>
671669
async_buffer<_Tp, _Properties...> make_async_buffer(
672-
stream_ref __stream, any_resource<_Properties...> __mr, size_t __size, const _Tp& __value, const _Env& __env = {})
670+
stream_ref __stream,
671+
any_resource<_Properties...> __mr,
672+
size_t __size,
673+
const _Tp& __value,
674+
[[maybe_unused]] const _Env& __env = {})
673675
{
674676
auto __res = async_buffer<_Tp, _Properties...>{__stream, __mr, __size, no_init};
675677
__fill_n<_Tp, !::cuda::mr::__is_device_accessible<_Properties...>>(
@@ -678,7 +680,7 @@ async_buffer<_Tp, _Properties...> make_async_buffer(
678680
}
679681

680682
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Env = ::cuda::std::execution::env<>)
681-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND __has_default_queries<_Resource>)
683+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>)
682684
auto make_async_buffer(
683685
stream_ref __stream, _Resource&& __mr, size_t __size, const _Tp& __value, [[maybe_unused]] const _Env& __env = {})
684686
{
@@ -705,7 +707,7 @@ async_buffer<_Tp, _Properties...> make_async_buffer(
705707
}
706708

707709
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Env = ::cuda::std::execution::env<>)
708-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND __has_default_queries<_Resource>)
710+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>)
709711
auto make_async_buffer(
710712
stream_ref __stream, _Resource&& __mr, size_t __size, ::cuda::experimental::no_init_t, const _Env& __env = {})
711713
{
@@ -723,8 +725,8 @@ async_buffer<_Tp, _Properties...> make_async_buffer(
723725
}
724726

725727
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Iter, class _Env = ::cuda::std::execution::env<>)
726-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND
727-
__has_default_queries<_Resource> _CCCL_AND ::cuda::std::__has_forward_traversal<_Iter>)
728+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>
729+
_CCCL_AND ::cuda::std::__has_forward_traversal<_Iter>)
728730
auto make_async_buffer(stream_ref __stream, _Resource&& __mr, _Iter __first, _Iter __last, const _Env& __env = {})
729731
{
730732
using __buffer_type = __buffer_type_for_props<_Tp, typename ::cuda::std::decay_t<_Resource>::default_queries>;
@@ -743,7 +745,7 @@ async_buffer<_Tp, _Properties...> make_async_buffer(
743745
}
744746

745747
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Env = ::cuda::std::execution::env<>)
746-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND __has_default_queries<_Resource>)
748+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>)
747749
auto make_async_buffer(
748750
stream_ref __stream, _Resource&& __mr, ::cuda::std::initializer_list<_Tp> __ilist, const _Env& __env = {})
749751
{
@@ -761,8 +763,8 @@ make_async_buffer(stream_ref __stream, any_resource<_Properties...> __mr, _Range
761763
}
762764

763765
_CCCL_TEMPLATE(class _Tp, class _Resource, class _Range, class _Env = ::cuda::std::execution::env<>)
764-
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND
765-
__has_default_queries<_Resource> _CCCL_AND ::cuda::std::ranges::forward_range<_Range>)
766+
_CCCL_REQUIRES(::cuda::mr::resource<_Resource> _CCCL_AND ::cuda::mr::__has_default_queries<_Resource>
767+
_CCCL_AND ::cuda::std::ranges::forward_range<_Range>)
766768
auto make_async_buffer(stream_ref __stream, _Resource&& __mr, _Range&& __range, const _Env& __env = {})
767769
{
768770
using __buffer_type = __buffer_type_for_props<_Tp, typename ::cuda::std::decay_t<_Resource>::default_queries>;

cudax/include/cuda/experimental/__container/uninitialized_async_buffer.cuh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
#include <cuda/std/span>
3434

3535
#include <cuda/experimental/__memory_resource/any_resource.cuh>
36-
#include <cuda/experimental/__memory_resource/properties.cuh>
3736

3837
#include <cuda/std/__cccl/prologue.h>
3938

@@ -119,7 +118,7 @@ private:
119118
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
120119
transform_device_argument(::cuda::stream_ref, uninitialized_async_buffer& __self) noexcept
121120
_CCCL_TRAILING_REQUIRES(::cuda::std::span<_Tp>)(
122-
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<device_accessible, _Properties...>)
121+
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<::cuda::mr::device_accessible, _Properties...>)
123122
{
124123
// TODO add auto synchronization
125124
return {__self.__get_data(), __self.size()};
@@ -131,7 +130,7 @@ private:
131130
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
132131
transform_device_argument(::cuda::stream_ref, const uninitialized_async_buffer& __self) noexcept
133132
_CCCL_TRAILING_REQUIRES(::cuda::std::span<const _Tp>)(
134-
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<device_accessible, _Properties...>)
133+
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<::cuda::mr::device_accessible, _Properties...>)
135134
{
136135
// TODO add auto synchronization
137136
return {__self.__get_data(), __self.size()};
@@ -390,7 +389,7 @@ public:
390389
};
391390

392391
template <class _Tp>
393-
using uninitialized_async_device_buffer = uninitialized_async_buffer<_Tp, device_accessible>;
392+
using uninitialized_async_device_buffer = uninitialized_async_buffer<_Tp, ::cuda::mr::device_accessible>;
394393

395394
} // namespace cuda::experimental
396395

cudax/include/cuda/experimental/__container/uninitialized_buffer.cuh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
#include <cuda/std/span>
3232

3333
#include <cuda/experimental/__memory_resource/any_resource.cuh>
34-
#include <cuda/experimental/__memory_resource/properties.cuh>
3534

3635
#include <cuda/std/__cccl/prologue.h>
3736

@@ -108,7 +107,7 @@ private:
108107
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
109108
transform_device_argument(::cuda::stream_ref, uninitialized_buffer& __self) noexcept
110109
_CCCL_TRAILING_REQUIRES(::cuda::std::span<_Tp>)(
111-
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<device_accessible, _Properties...>)
110+
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<::cuda::mr::device_accessible, _Properties...>)
112111
{
113112
return {__self.__get_data(), __self.size()};
114113
}
@@ -119,7 +118,7 @@ private:
119118
[[nodiscard]] _CCCL_HIDE_FROM_ABI friend auto
120119
transform_device_argument(::cuda::stream_ref, const uninitialized_buffer& __self) noexcept
121120
_CCCL_TRAILING_REQUIRES(::cuda::std::span<const _Tp>)(
122-
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<device_accessible, _Properties...>)
121+
::cuda::std::same_as<_Tp, _Tp2>&& ::cuda::std::__is_included_in_v<::cuda::mr::device_accessible, _Properties...>)
123122
{
124123
return {__self.__get_data(), __self.size()};
125124
}
@@ -289,7 +288,7 @@ public:
289288
};
290289

291290
template <class _Tp>
292-
using uninitialized_device_buffer = uninitialized_buffer<_Tp, device_accessible>;
291+
using uninitialized_device_buffer = uninitialized_buffer<_Tp, ::cuda::mr::device_accessible>;
293292

294293
} // namespace cuda::experimental
295294

cudax/include/cuda/experimental/__execution/apply_sender.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ public:
6363
//! @throws Any exception thrown by the underlying domain's `apply_sender`.
6464
_CCCL_EXEC_CHECK_DISABLE
6565
template <class _Domain, class _Tag, class _Sndr, class... _Args>
66-
_CCCL_NODEBUG_API constexpr auto operator()(_Domain, _Tag, _Sndr&& __sndr, _Args&&... __args) const
66+
_CCCL_API constexpr auto operator()(_Domain, _Tag, _Sndr&& __sndr, _Args&&... __args) const
6767
noexcept(noexcept(__apply_domain_t<_Domain, _Tag, _Sndr, _Args...>{}.apply_sender(
6868
_Tag{}, static_cast<_Sndr&&>(__sndr), static_cast<_Args&&>(__args)...)))
6969
-> __apply_sender_result_t<__apply_domain_t<_Domain, _Tag, _Sndr, _Args...>, _Tag, _Sndr, _Args...>

cudax/include/cuda/experimental/__execution/bulk.cuh

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -68,13 +68,22 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __state_t
6868
template <class _Sndr, class _Shape>
6969
struct _CCCL_TYPE_VISIBILITY_DEFAULT __attrs_t
7070
{
71-
[[nodiscard]] _CCCL_HOST_API constexpr auto query(get_launch_config_t) const noexcept
71+
[[nodiscard]] _CCCL_HOST_API static constexpr auto __get_launch_config(_Shape __shape) noexcept
7272
{
7373
constexpr int __block_threads = 256;
74-
const int __grid_blocks = ::cuda::ceil_div(static_cast<int>(__shape_), __block_threads);
74+
const int __grid_blocks = ::cuda::ceil_div(static_cast<int>(__shape), __block_threads);
7575
return experimental::make_config(block_dims<__block_threads>(), grid_dims(__grid_blocks));
7676
}
7777

78+
using __launch_config_t = decltype(__get_launch_config(_Shape()));
79+
80+
[[nodiscard]] _CCCL_API constexpr auto query(get_launch_config_t) const noexcept -> __launch_config_t
81+
{
82+
NV_IF_TARGET(NV_IS_HOST,
83+
(return __get_launch_config(__shape_);),
84+
(_CCCL_ASSERT(false, "cannot get a launch configuration from device"); ::cuda::std::terminate();))
85+
}
86+
7887
_CCCL_EXEC_CHECK_DISABLE
7988
_CCCL_TEMPLATE(class _Query, class... _Args)
8089
_CCCL_REQUIRES(__forwarding_query<_Query> _CCCL_AND __queryable_with<env_of_t<_Sndr>, _Query, _Args...>)
@@ -152,7 +161,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __bulk_t
152161
execution::set_stopped(static_cast<_Rcvr&&>(__state_->__rcvr_));
153162
}
154163

155-
[[nodiscard]] _CCCL_NODEBUG_API constexpr auto get_env() const noexcept -> __fwd_env_t<env_of_t<_Rcvr>>
164+
[[nodiscard]] _CCCL_API constexpr auto get_env() const noexcept -> __fwd_env_t<env_of_t<_Rcvr>>
156165
{
157166
return __fwd_env(execution::get_env(__state_->__rcvr_));
158167
}
@@ -188,7 +197,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __bulk_t
188197
struct _CCCL_TYPE_VISIBILITY_DEFAULT __closure_base_t
189198
{
190199
template <class _Sndr>
191-
[[nodiscard]] _CCCL_NODEBUG_API friend constexpr auto operator|(_Sndr&& __sndr, __closure_base_t __self)
200+
[[nodiscard]] _CCCL_API friend constexpr auto operator|(_Sndr&& __sndr, __closure_base_t __self)
192201
{
193202
static_assert(__is_sender<_Sndr>);
194203

@@ -202,7 +211,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __bulk_t
202211
{{}, static_cast<__closure_base_t&&>(__self), static_cast<_Sndr&&>(__sndr)}};
203212
}
204213

205-
_CCCL_NO_UNIQUE_ADDRESS _Policy __policy_;
214+
/*_CCCL_NO_UNIQUE_ADDRESS*/ _Policy __policy_;
206215
_Shape __shape_;
207216
_Fn __fn_;
208217
};
@@ -250,7 +259,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __bulk_t
250259
return {__state_.__shape_, __sndr_};
251260
}
252261

253-
_CCCL_NO_UNIQUE_ADDRESS _BulkTag __tag_;
262+
/*_CCCL_NO_UNIQUE_ADDRESS*/ _BulkTag __tag_;
254263
__closure_base_t<_Policy, _Shape, _Fn> __state_;
255264
_Sndr __sndr_;
256265
};
@@ -267,7 +276,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT __bulk_t
267276
// This function call operator creates a sender adaptor closure object that can appear
268277
// on the right-hand side of a pipe operator, like: sndr | bulk(par, shape, fn).
269278
template <class _Policy, class _Shape, class _Fn>
270-
[[nodiscard]] _CCCL_NODEBUG_API auto operator()(_Policy __policy, _Shape __shape, _Fn __fn) const
279+
[[nodiscard]] _CCCL_API auto operator()(_Policy __policy, _Shape __shape, _Fn __fn) const
271280
{
272281
static_assert(::cuda::std::integral<_Shape>);
273282
static_assert(::cuda::std::is_execution_policy_v<_Policy>);
@@ -389,7 +398,7 @@ struct _CCCL_TYPE_VISIBILITY_DEFAULT bulk_t : __bulk_t<bulk_t>
389398
{
390399
_CCCL_EXEC_CHECK_DISABLE
391400
template <class... _Ts>
392-
_CCCL_NODEBUG_API void operator()(_Shape __begin, _Shape __end, _Ts&&... __values) noexcept(
401+
_CCCL_API void operator()(_Shape __begin, _Shape __end, _Ts&&... __values) noexcept(
393402
__nothrow_callable<_Fn&, _Shape, decltype(__values)&...>)
394403
{
395404
for (; __begin != __end; ++__begin)

cudax/include/cuda/experimental/__execution/completion_behavior.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ struct get_completion_behavior_t
139139
return __attrs.query(*this, __env);
140140
}
141141

142-
[[nodiscard]] _CCCL_NODEBUG_API static constexpr auto query(forwarding_query_t) noexcept -> bool
142+
[[nodiscard]] _CCCL_API static constexpr auto query(forwarding_query_t) noexcept -> bool
143143
{
144144
return true;
145145
}

0 commit comments

Comments
 (0)