diff --git a/kernels/optimized/CMakeLists.txt b/kernels/optimized/CMakeLists.txt index f87e2c8d722..01a10f77846 100644 --- a/kernels/optimized/CMakeLists.txt +++ b/kernels/optimized/CMakeLists.txt @@ -21,7 +21,10 @@ if(NOT EXECUTORCH_ROOT) set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..) endif() -set(_common_compile_options -Wno-deprecated-declarations) +set(_common_compile_options + $<$:/wd4996> + $<$>:-Wno-deprecated-declarations> +) # Note for apple platform we can rely on Accelerate framework Will come back to # this diff --git a/kernels/optimized/cpu/op_bmm.cpp b/kernels/optimized/cpu/op_bmm.cpp index 9cbd30cb6e1..171f14de399 100644 --- a/kernels/optimized/cpu/op_bmm.cpp +++ b/kernels/optimized/cpu/op_bmm.cpp @@ -150,7 +150,8 @@ Tensor& opt_bmm_out( ET_KERNEL_CHECK( ctx, check_bmm_out_args(self, mat2, out), InvalidArgument, out); - constexpr auto name = "bmm.out"; + static constexpr auto name = "bmm.out"; + auto self_type = self.scalar_type(); if (executorch::runtime::isComplexType(self_type)) { diff --git a/kernels/portable/cpu/op_convolution_backward.cpp b/kernels/portable/cpu/op_convolution_backward.cpp index 2535ed4eb6c..ffe9ed57b41 100644 --- a/kernels/portable/cpu/op_convolution_backward.cpp +++ b/kernels/portable/cpu/op_convolution_backward.cpp @@ -305,7 +305,7 @@ std::tuple convolution_backward_out( ret_val); } - constexpr auto name = "convolution_backward.out"; + static constexpr auto name = "convolution_backward.out"; ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() { conv2d_backward_impl( diff --git a/kernels/portable/cpu/op_gather.cpp b/kernels/portable/cpu/op_gather.cpp index 02ea502ca63..a42256ac4fc 100644 --- a/kernels/portable/cpu/op_gather.cpp +++ b/kernels/portable/cpu/op_gather.cpp @@ -85,7 +85,7 @@ Tensor& gather_out( InvalidArgument, out); - constexpr auto name = "gather.out"; + static constexpr auto name = "gather.out"; ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() { gather_helper(in, index, out, dim); diff --git a/kernels/portable/cpu/op_masked_scatter.cpp b/kernels/portable/cpu/op_masked_scatter.cpp index 16cef033670..e91a8d0a08d 100644 --- a/kernels/portable/cpu/op_masked_scatter.cpp +++ b/kernels/portable/cpu/op_masked_scatter.cpp @@ -41,13 +41,13 @@ Tensor& masked_scatter_out( InvalidArgument, out); - constexpr auto op_name = "masked_scatter.out"; - int64_t idx = 0; int64_t src_numel = src.numel(); bool src_numel_check = true; - ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE, [&]() { + static constexpr auto name = "masked_scatter.out"; + + ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE, [&]() { const CTYPE* const src_data = src.const_data_ptr(); apply_binary_elementwise_fn( [src_data, &idx, &src_numel, &src_numel_check]( diff --git a/kernels/portable/cpu/op_max.cpp b/kernels/portable/cpu/op_max.cpp index 467c8ccffd5..38ed50317d1 100644 --- a/kernels/portable/cpu/op_max.cpp +++ b/kernels/portable/cpu/op_max.cpp @@ -124,7 +124,7 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out); - constexpr auto name = "max.unary_out"; + static constexpr auto name = "max.unary_out"; ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { diff --git a/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp b/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp index 2013d1272d9..99dc8a89293 100644 --- a/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp +++ b/kernels/portable/cpu/op_max_pool2d_with_indices_backward.cpp @@ -169,7 +169,7 @@ Tensor& max_pool2d_with_indices_backward_out( InvalidArgument, grad_input); - constexpr auto name = "max_pool2d_with_indices_backward.grad_input"; + static constexpr auto name = "max_pool2d_with_indices_backward.grad_input"; ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() { max_pool_backward_impl(grad_input, grad_output, indices); diff --git a/kernels/portable/cpu/op_min.cpp b/kernels/portable/cpu/op_min.cpp index 304321bb9f8..711774d4cce 100644 --- a/kernels/portable/cpu/op_min.cpp +++ b/kernels/portable/cpu/op_min.cpp @@ -124,7 +124,7 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) { ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out); - constexpr auto name = "min.unary_out"; + static constexpr auto name = "min.unary_out"; ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { diff --git a/kernels/portable/cpu/op_native_batch_norm.cpp b/kernels/portable/cpu/op_native_batch_norm.cpp index aa6919924f1..b8905e0ef35 100644 --- a/kernels/portable/cpu/op_native_batch_norm.cpp +++ b/kernels/portable/cpu/op_native_batch_norm.cpp @@ -102,7 +102,7 @@ std::tuple _native_batch_norm_legit_no_training_out( size_t outer = getLeadingDims(in, C_dim); size_t inner = getTrailingDims(in, C_dim); - constexpr auto name = "native_batch_norm_legit_no_training.out"; + static constexpr auto name = "native_batch_norm_legit_no_training.out"; ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] { const CTYPE* in_data = in.const_data_ptr(); @@ -259,7 +259,7 @@ std::tuple _native_batch_norm_legit_no_stats_out( InvalidArgument, ret_val); - constexpr auto name = "_native_batch_norm_legit.no_stats_out"; + static constexpr auto name = "_native_batch_norm_legit.no_stats_out"; ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] { const CTYPE* in_data = in.const_data_ptr(); diff --git a/kernels/portable/cpu/op_native_group_norm.cpp b/kernels/portable/cpu/op_native_group_norm.cpp index 7882204e57e..9e300dc7829 100644 --- a/kernels/portable/cpu/op_native_group_norm.cpp +++ b/kernels/portable/cpu/op_native_group_norm.cpp @@ -190,7 +190,7 @@ std::tuple native_group_norm_out( ret_val); } - constexpr auto name = "native_group_norm.out"; + static constexpr auto name = "native_group_norm.out"; ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() { group_norm( diff --git a/kernels/portable/cpu/op_pdist_forward.cpp b/kernels/portable/cpu/op_pdist_forward.cpp index e412e43aa0c..f4093260ff6 100644 --- a/kernels/portable/cpu/op_pdist_forward.cpp +++ b/kernels/portable/cpu/op_pdist_forward.cpp @@ -40,7 +40,7 @@ Tensor& _pdist_forward_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "_pdist_forward.out"; + static constexpr auto name = "_pdist_forward.out"; ET_SWITCH_FLOATHBF16_TYPES( in_type, ctx, name, CTYPE, [&] { pdist(in, out, p); }); diff --git a/kernels/portable/cpu/op_prod.cpp b/kernels/portable/cpu/op_prod.cpp index 54580459d7c..ba76a1f200c 100644 --- a/kernels/portable/cpu/op_prod.cpp +++ b/kernels/portable/cpu/op_prod.cpp @@ -32,7 +32,7 @@ Tensor& prod_out( ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - constexpr auto name = "prod.int_out"; + static constexpr auto name = "prod.int_out"; ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { @@ -72,7 +72,7 @@ Tensor& prod_int_out( ScalarType in_type = in.scalar_type(); ScalarType out_type = out.scalar_type(); - constexpr auto name = "prod.int_out"; + static constexpr auto name = "prod.int_out"; ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] { ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { diff --git a/kernels/portable/cpu/op_reflection_pad1d.cpp b/kernels/portable/cpu/op_reflection_pad1d.cpp index 5f1b68e210d..a591368f44e 100644 --- a/kernels/portable/cpu/op_reflection_pad1d.cpp +++ b/kernels/portable/cpu/op_reflection_pad1d.cpp @@ -44,7 +44,7 @@ Tensor& reflection_pad1d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "reflection_pad1d.out"; + static constexpr auto name = "reflection_pad1d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad1d(reflection_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_reflection_pad2d.cpp b/kernels/portable/cpu/op_reflection_pad2d.cpp index 821d09253c9..6ef3ad7bff6 100644 --- a/kernels/portable/cpu/op_reflection_pad2d.cpp +++ b/kernels/portable/cpu/op_reflection_pad2d.cpp @@ -44,7 +44,7 @@ Tensor& reflection_pad2d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "reflection_pad2d.out"; + static constexpr auto name = "reflection_pad2d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad2d(reflection_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_reflection_pad3d.cpp b/kernels/portable/cpu/op_reflection_pad3d.cpp index cb0dd39a071..57338dd47d3 100644 --- a/kernels/portable/cpu/op_reflection_pad3d.cpp +++ b/kernels/portable/cpu/op_reflection_pad3d.cpp @@ -44,7 +44,7 @@ Tensor& reflection_pad3d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "reflection_pad3d.out"; + static constexpr auto name = "reflection_pad3d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad3d(reflection_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_repeat_interleave.cpp b/kernels/portable/cpu/op_repeat_interleave.cpp index 50da02c5646..a7b9f18f434 100644 --- a/kernels/portable/cpu/op_repeat_interleave.cpp +++ b/kernels/portable/cpu/op_repeat_interleave.cpp @@ -72,7 +72,7 @@ Tensor& repeat_interleave_Tensor_out( int64_t repeats_sum = 0; - constexpr auto name = "repeat_interleave.Tensor_out"; + static constexpr auto name = "repeat_interleave.Tensor_out"; ET_SWITCH_TWO_TYPES(Int, Long, repeats.scalar_type(), ctx, name, CTYPE, [&] { const CTYPE* repeats_data = repeats.const_data_ptr(); diff --git a/kernels/portable/cpu/op_replication_pad1d.cpp b/kernels/portable/cpu/op_replication_pad1d.cpp index 0b38c4f1540..5d4b907adac 100644 --- a/kernels/portable/cpu/op_replication_pad1d.cpp +++ b/kernels/portable/cpu/op_replication_pad1d.cpp @@ -36,7 +36,7 @@ Tensor& replication_pad1d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "replication_pad1d.out"; + static constexpr auto name = "replication_pad1d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad1d(replication_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_replication_pad2d.cpp b/kernels/portable/cpu/op_replication_pad2d.cpp index e3d79644db7..693f4df8636 100644 --- a/kernels/portable/cpu/op_replication_pad2d.cpp +++ b/kernels/portable/cpu/op_replication_pad2d.cpp @@ -36,7 +36,7 @@ Tensor& replication_pad2d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "replication_pad2d.out"; + static constexpr auto name = "replication_pad2d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad2d(replication_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_replication_pad3d.cpp b/kernels/portable/cpu/op_replication_pad3d.cpp index f23bde05beb..12e82d53167 100644 --- a/kernels/portable/cpu/op_replication_pad3d.cpp +++ b/kernels/portable/cpu/op_replication_pad3d.cpp @@ -36,7 +36,7 @@ Tensor& replication_pad3d_out( out); ScalarType in_type = in.scalar_type(); - constexpr auto name = "replication_pad3d.out"; + static constexpr auto name = "replication_pad3d.out"; ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] { pad3d(replication_ix, in, out, padding); diff --git a/kernels/portable/cpu/op_roll.cpp b/kernels/portable/cpu/op_roll.cpp index 109be64fbed..4d314b3d191 100644 --- a/kernels/portable/cpu/op_roll.cpp +++ b/kernels/portable/cpu/op_roll.cpp @@ -80,7 +80,7 @@ Tensor& roll_out( size_t dim_shift_array_length = static_cast(in.dim()); // NOLINT IntArrayRef dim_shifts(dim_shift_array, dim_shift_array_length); - constexpr auto name = "roll.out"; + static constexpr auto name = "roll.out"; ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] { const CTYPE* in_data = in.const_data_ptr(); diff --git a/kernels/portable/cpu/op_scalar_tensor.cpp b/kernels/portable/cpu/op_scalar_tensor.cpp index bff4ecc318c..8136400a18f 100644 --- a/kernels/portable/cpu/op_scalar_tensor.cpp +++ b/kernels/portable/cpu/op_scalar_tensor.cpp @@ -22,7 +22,7 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) { ScalarType out_type = out.scalar_type(); - constexpr auto name = "scalar_tensor.out"; + static constexpr auto name = "scalar_tensor.out"; ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() { auto opt_val_casted = utils::internal::check_overflow_scalar_cast(s); diff --git a/kernels/portable/cpu/op_scatter.cpp b/kernels/portable/cpu/op_scatter.cpp index 42d40c8284d..a93839193fe 100644 --- a/kernels/portable/cpu/op_scatter.cpp +++ b/kernels/portable/cpu/op_scatter.cpp @@ -119,7 +119,7 @@ Tensor& scatter_src_out( ET_KERNEL_CHECK( ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out); - constexpr auto name = "scatter.src_out"; + static constexpr auto name = "scatter.src_out"; ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() { scatter_src_helper(in, dim, index, src, out); @@ -146,7 +146,7 @@ Tensor& scatter_value_out( ET_KERNEL_CHECK( ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out); - constexpr auto name = "scatter.value_out"; + static constexpr auto name = "scatter.value_out"; ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() { auto opt_val = utils::internal::check_overflow_scalar_cast(value); diff --git a/kernels/portable/cpu/op_topk.cpp b/kernels/portable/cpu/op_topk.cpp index bdea02f83bc..3082bc94662 100644 --- a/kernels/portable/cpu/op_topk.cpp +++ b/kernels/portable/cpu/op_topk.cpp @@ -6,14 +6,14 @@ * LICENSE file in the root directory of this source tree. */ -#include -#include -#include - #include #include #include +#include +#include +#include + namespace torch { namespace executor { namespace native { @@ -117,14 +117,18 @@ void perform_topk( queue[i].second = i; } - // Perform topk on the queue - const auto elem_greater = [](const elem_t& x, const elem_t& y) -> bool { + // Perform topk on the queue, explict typing for the lambda to satisfy + // msvc compiler. + bool (*elem_greater)(const elem_t&, const elem_t&) = + [](const elem_t& x, const elem_t& y) -> bool { return float_less_than(y.first, x.first); }; - const auto elem_less = [](const elem_t& x, const elem_t& y) -> bool { + bool (*elem_less)(const elem_t&, const elem_t&) = + [](const elem_t& x, const elem_t& y) -> bool { return float_less_than(x.first, y.first); }; - const auto cmp = largest ? elem_greater : elem_less; + bool (*cmp)(const elem_t&, const elem_t&) = + largest ? elem_greater : elem_less; if (use_partial_sort) { std::partial_sort(queue, queue + k, queue + dim_size, cmp); } else { @@ -187,7 +191,7 @@ std::tuple topk_values( InvalidArgument, out); - constexpr auto name = "topk.values"; + static constexpr auto name = "topk.values"; if (in.numel() == 0 || (k == 0 && in.dim() > 0)) { return out; diff --git a/kernels/portable/cpu/op_var.cpp b/kernels/portable/cpu/op_var.cpp index fcaa79a54fe..202d7df80bc 100644 --- a/kernels/portable/cpu/op_var.cpp +++ b/kernels/portable/cpu/op_var.cpp @@ -91,7 +91,7 @@ Tensor& var_out( const size_t num = get_reduced_dim_product(in, dim_list); const size_t denom = unbiased ? num - 1 : num; - constexpr auto name = "var.out"; + static constexpr auto name = "var.out"; ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE_IN, [&] { ET_SWITCH_FLOATHBF16_TYPES(out.scalar_type(), ctx, name, CTYPE_OUT, [&] { @@ -123,7 +123,7 @@ Tensor& var_correction_out( InvalidArgument, out); - constexpr auto name = "var.correction_out"; + static constexpr auto name = "var.correction_out"; double correction_val = 1; if (correction.has_value()) { diff --git a/kernels/portable/cpu/op_view_as_real_copy.cpp b/kernels/portable/cpu/op_view_as_real_copy.cpp index 4a2803eded0..4461ecb02f8 100644 --- a/kernels/portable/cpu/op_view_as_real_copy.cpp +++ b/kernels/portable/cpu/op_view_as_real_copy.cpp @@ -64,7 +64,7 @@ Tensor& view_as_real_copy_out( ET_KERNEL_CHECK( ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out); - constexpr auto op_name = "view_as_real_copy.out"; + static constexpr auto op_name = "view_as_real_copy.out"; ET_SWITCH_COMPLEXH_TYPES(self.scalar_type(), ctx, op_name, CTYPE_IN, [&] { ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] { diff --git a/kernels/portable/cpu/util/CMakeLists.txt b/kernels/portable/cpu/util/CMakeLists.txt index 047760f321e..eabf3add9b0 100644 --- a/kernels/portable/cpu/util/CMakeLists.txt +++ b/kernels/portable/cpu/util/CMakeLists.txt @@ -21,7 +21,10 @@ endif() list(TRANSFORM _kernels_util_all_deps__srcs PREPEND "${EXECUTORCH_ROOT}/") -set(_common_compile_options -Wno-deprecated-declarations) +set(_common_compile_options + $<$:/wd4996> + $<$>:-Wno-deprecated-declarations> +) add_library(kernels_util_all_deps ${_kernels_util_all_deps__srcs}) target_link_libraries(kernels_util_all_deps PRIVATE executorch_core) diff --git a/kernels/portable/cpu/util/elementwise_util.h b/kernels/portable/cpu/util/elementwise_util.h index cc1110e10d7..f5a29f71256 100644 --- a/kernels/portable/cpu/util/elementwise_util.h +++ b/kernels/portable/cpu/util/elementwise_util.h @@ -85,7 +85,7 @@ inline void dtype_specialized_elementwise_fn_impl( static_assert( (std::is_same_v> && ...)); - constexpr auto kNumInputs = sizeof...(inputs); + static constexpr auto kNumInputs = sizeof...(inputs); // All inputs must be of type CTYPE_COMPUTE. ET_DCHECK( ((inputs.first->scalar_type() == @@ -229,7 +229,7 @@ inline void apply_elementwise_fn_generic_impl( const Tensor& out, SupportedTensorDtypes out_dtypes, Args... inputs) { - constexpr auto kNumInputs = sizeof...(inputs); + static constexpr auto kNumInputs = sizeof...(inputs); struct InputInfo { load_to_compute_fn load_to_compute; diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index 67a813f8d34..9b490da244c 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -66,7 +66,7 @@ * dimension of all the tensors as the upper bound for the for loop. */ #define ET_CHECK_SAME_SHAPE2(a__, b__) \ - ({ \ + do { \ const size_t a_numel__ = (a__).numel(); \ const size_t b_numel__ = (b__).numel(); \ const size_t a_dim__ = (a__).dim(); \ @@ -89,10 +89,10 @@ a_size__, \ b_size__); \ } \ - }) + } while (0) #define ET_CHECK_SAME_SHAPE3(a__, b__, c__) \ - ({ \ + do { \ const size_t a_numel__ = (a__).numel(); \ const size_t b_numel__ = (b__).numel(); \ const size_t c_numel__ = (c__).numel(); \ @@ -124,11 +124,11 @@ b_size__, \ c_size__); \ } \ - }) + } while (0) /// Asserts that all tensors have the same dtype. #define ET_CHECK_SAME_DTYPE2(a__, b__) \ - ({ \ + do { \ const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ ET_CHECK_MSG( \ @@ -136,10 +136,10 @@ ET_TENSOR_CHECK_PREFIX__ ": dtype={%" PRId8 ", %" PRId8 "}", \ static_cast(a_type__), \ static_cast(b_type__)); \ - }) + } while (0) #define ET_CHECK_SAME_DTYPE3(a__, b__, c__) \ - ({ \ + do { \ const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ const ::executorch::aten::ScalarType c_type__ = (c__).scalar_type(); \ @@ -150,7 +150,7 @@ static_cast(a_type__), \ static_cast(b_type__), \ static_cast(c_type__)); \ - }) + } while (0) /** * Asserts that all tensors have the same shape and dtype. @@ -159,7 +159,7 @@ * macros independently, because it only calls ET_CHECK_MSG once. */ #define ET_CHECK_SAME_SHAPE_AND_DTYPE2(a__, b__) \ - ({ \ + do { \ const size_t a_numel__ = (a__).numel(); \ const size_t b_numel__ = (b__).numel(); \ const size_t a_dim__ = (a__).dim(); \ @@ -189,10 +189,10 @@ a_size__, \ b_size__); \ } \ - }) + } while (0) #define ET_CHECK_SAME_SHAPE_AND_DTYPE3(a__, b__, c__) \ - ({ \ + do { \ const size_t a_numel__ = (a__).numel(); \ const size_t b_numel__ = (b__).numel(); \ const size_t c_numel__ = (c__).numel(); \ @@ -233,13 +233,13 @@ b_size__, \ c_size__); \ } \ - }) + } while (0) /** * Assert that the input tensor is contiguous tensor. */ #define ET_CHECK_CONTIGUOUS(a__) \ - ({ \ + do { \ const ::executorch::aten::ArrayRef \ strides = a__.strides(); \ const ::executorch::aten::ArrayRef sizes = \ @@ -260,7 +260,7 @@ strides[i - 1], \ strides[i] * sizes[i]); \ } \ - }) + } while (0) /** * Assert the input two tensors share same strides. @@ -268,7 +268,7 @@ * of any input tensors. */ #define ET_CHECK_SAME_STRIDES2(a__, b__) \ - ({ \ + do { \ ET_CHECK_MSG( \ a__.dim() == b__.dim(), \ "Two tensors shall have same number of strides, but not %zu and %zu.", \ @@ -288,7 +288,7 @@ (int32_t)a_strides[i], \ (int32_t)b_strides[i]); \ } \ - }) + } while (0) /** * Assert the input three tensors share same strides. @@ -296,7 +296,7 @@ * of any input tensors. */ #define ET_CHECK_SAME_STRIDES3(a__, b__, c__) \ - ({ \ + do { \ ET_CHECK_MSG( \ a__.dim() == b__.dim() && b__.dim() == c__.dim(), \ "Three tensors shall have same number of strides, " \ @@ -322,17 +322,17 @@ (int32_t)b_strides[i], \ (int32_t)c_strides[i]); \ } \ - }) + } while (0) #define ET_CHECK_DEFAULT_OR_CHANNELSLAST_DIMORDER(t__) \ - ({ \ + do { \ ET_CHECK_MSG( \ is_contiguous_dim_order( \ t__.dim_order().data(), t__.dim_order().size()) || \ is_channels_last_dim_order( \ t__.dim_order().data(), t__.dim_order().size()), \ "Tensor must have default or channels last dim order"); \ - }) + } while (0) /** * DEPRECATED: Please use ET_CHECK_OR_RETURN_FALSE instead and provide