Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion kernels/optimized/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@ if(NOT EXECUTORCH_ROOT)
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
endif()

set(_common_compile_options -Wno-deprecated-declarations)
set(_common_compile_options
$<$<CXX_COMPILER_ID:MSVC>:/wd4996>
$<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wno-deprecated-declarations>
)

# Note for apple platform we can rely on Accelerate framework Will come back to
# this
Expand Down
3 changes: 2 additions & 1 deletion kernels/optimized/cpu/op_bmm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,10 @@ Tensor& opt_bmm_out(
ET_KERNEL_CHECK(
ctx, check_bmm_out_args(self, mat2, out), InvalidArgument, out);

constexpr auto name = "bmm.out";
auto self_type = self.scalar_type();

static constexpr auto name = "bmm.out";

if (executorch::runtime::isComplexType(self_type)) {
ET_SWITCH_COMPLEXH_TYPES(self_type, ctx, name, CTYPE, [&]() {
bmm_kernel<CTYPE>(self, mat2, out);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_convolution_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> convolution_backward_out(
ret_val);
}

constexpr auto name = "convolution_backward.out";
static constexpr auto name = "convolution_backward.out";

ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
conv2d_backward_impl<CTYPE>(
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_gather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Tensor& gather_out(
InvalidArgument,
out);

constexpr auto name = "gather.out";
static constexpr auto name = "gather.out";

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
gather_helper<CTYPE>(in, index, out, dim);
Expand Down
6 changes: 3 additions & 3 deletions kernels/portable/cpu/op_masked_scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ Tensor& masked_scatter_out(
InvalidArgument,
out);

constexpr auto op_name = "masked_scatter.out";

int64_t idx = 0;
int64_t src_numel = src.numel();
bool src_numel_check = true;

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, op_name, CTYPE, [&]() {
static constexpr auto name = "masked_scatter.out";

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE, [&]() {
const CTYPE* const src_data = src.const_data_ptr<CTYPE>();
apply_binary_elementwise_fn<CTYPE, bool, CTYPE>(
[src_data, &idx, &src_numel, &src_numel_check](
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_max.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ max_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {

ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);

constexpr auto name = "max.unary_out";
static constexpr auto name = "max.unary_out";

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ Tensor& max_pool2d_with_indices_backward_out(
InvalidArgument,
grad_input);

constexpr auto name = "max_pool2d_with_indices_backward.grad_input";
static constexpr auto name = "max_pool2d_with_indices_backward.grad_input";

ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
max_pool_backward_impl<CTYPE, false>(grad_input, grad_output, indices);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_min.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ min_unary_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {

ET_KERNEL_CHECK(ctx, canCast(in_type, out_type), InvalidArgument, out);

constexpr auto name = "min.unary_out";
static constexpr auto name = "min.unary_out";

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_native_batch_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_training_out(
size_t outer = getLeadingDims(in, C_dim);
size_t inner = getTrailingDims(in, C_dim);

constexpr auto name = "native_batch_norm_legit_no_training.out";
static constexpr auto name = "native_batch_norm_legit_no_training.out";

ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
Expand Down Expand Up @@ -259,7 +259,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_stats_out(
InvalidArgument,
ret_val);

constexpr auto name = "_native_batch_norm_legit.no_stats_out";
static constexpr auto name = "_native_batch_norm_legit.no_stats_out";

ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_native_group_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ std::tuple<Tensor&, Tensor&, Tensor&> native_group_norm_out(
ret_val);
}

constexpr auto name = "native_group_norm.out";
static constexpr auto name = "native_group_norm.out";

ET_SWITCH_FLOATHBF16_TYPES(input.scalar_type(), ctx, name, CTYPE, [&]() {
group_norm<CTYPE>(
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_pdist_forward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Tensor& _pdist_forward_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "_pdist_forward.out";
static constexpr auto name = "_pdist_forward.out";

ET_SWITCH_FLOATHBF16_TYPES(
in_type, ctx, name, CTYPE, [&] { pdist<CTYPE>(in, out, p); });
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_prod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ Tensor& prod_out(

ScalarType in_type = in.scalar_type();
ScalarType out_type = out.scalar_type();
constexpr auto name = "prod.int_out";
static constexpr auto name = "prod.int_out";

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
Expand Down Expand Up @@ -72,7 +72,7 @@ Tensor& prod_int_out(

ScalarType in_type = in.scalar_type();
ScalarType out_type = out.scalar_type();
constexpr auto name = "prod.int_out";
static constexpr auto name = "prod.int_out";

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, name, CTYPE_IN, [&] {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_reflection_pad1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Tensor& reflection_pad1d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "reflection_pad1d.out";
static constexpr auto name = "reflection_pad1d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad1d<CTYPE>(reflection_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_reflection_pad2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Tensor& reflection_pad2d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "reflection_pad2d.out";
static constexpr auto name = "reflection_pad2d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad2d<CTYPE>(reflection_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_reflection_pad3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Tensor& reflection_pad3d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "reflection_pad3d.out";
static constexpr auto name = "reflection_pad3d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad3d<CTYPE>(reflection_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_repeat_interleave.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ Tensor& repeat_interleave_Tensor_out(

int64_t repeats_sum = 0;

constexpr auto name = "repeat_interleave.Tensor_out";
static constexpr auto name = "repeat_interleave.Tensor_out";

ET_SWITCH_TWO_TYPES(Int, Long, repeats.scalar_type(), ctx, name, CTYPE, [&] {
const CTYPE* repeats_data = repeats.const_data_ptr<CTYPE>();
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_replication_pad1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Tensor& replication_pad1d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "replication_pad1d.out";
static constexpr auto name = "replication_pad1d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad1d<CTYPE>(replication_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_replication_pad2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Tensor& replication_pad2d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "replication_pad2d.out";
static constexpr auto name = "replication_pad2d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad2d<CTYPE>(replication_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_replication_pad3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Tensor& replication_pad3d_out(
out);

ScalarType in_type = in.scalar_type();
constexpr auto name = "replication_pad3d.out";
static constexpr auto name = "replication_pad3d.out";

ET_SWITCH_ALL_TYPES(in_type, ctx, name, CTYPE, [&] {
pad3d<CTYPE>(replication_ix, in, out, padding);
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_roll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ Tensor& roll_out(
size_t dim_shift_array_length = static_cast<size_t>(in.dim()); // NOLINT
IntArrayRef dim_shifts(dim_shift_array, dim_shift_array_length);

constexpr auto name = "roll.out";
static constexpr auto name = "roll.out";

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&] {
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
Expand Down
2 changes: 1 addition & 1 deletion kernels/portable/cpu/op_scalar_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) {

ScalarType out_type = out.scalar_type();

constexpr auto name = "scalar_tensor.out";
static constexpr auto name = "scalar_tensor.out";

ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() {
auto opt_val_casted = utils::internal::check_overflow_scalar_cast<CTYPE>(s);
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ Tensor& scatter_src_out(
ET_KERNEL_CHECK(
ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out);

constexpr auto name = "scatter.src_out";
static constexpr auto name = "scatter.src_out";

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
scatter_src_helper<CTYPE>(in, dim, index, src, out);
Expand All @@ -146,7 +146,7 @@ Tensor& scatter_value_out(
ET_KERNEL_CHECK(
ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out);

constexpr auto name = "scatter.value_out";
static constexpr auto name = "scatter.value_out";

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
auto opt_val = utils::internal::check_overflow_scalar_cast<CTYPE>(value);
Expand Down
19 changes: 11 additions & 8 deletions kernels/portable/cpu/op_topk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
* LICENSE file in the root directory of this source tree.
*/

#include <c10/util/irange.h>
#include <cmath>
#include <tuple>

#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/kernel/kernel_includes.h>

#include <c10/util/irange.h>
#include <cmath>
#include <tuple>

namespace torch {
namespace executor {
namespace native {
Expand Down Expand Up @@ -118,13 +118,16 @@ void perform_topk(
}

// Perform topk on the queue
const auto elem_greater = [](const elem_t& x, const elem_t& y) -> bool {
bool (*elem_greater)(const elem_t&, const elem_t&) =
[](const elem_t& x, const elem_t& y) -> bool {
return float_less_than(y.first, x.first);
};
const auto elem_less = [](const elem_t& x, const elem_t& y) -> bool {
bool (*elem_less)(const elem_t&, const elem_t&) =
[](const elem_t& x, const elem_t& y) -> bool {
return float_less_than(x.first, y.first);
};
const auto cmp = largest ? elem_greater : elem_less;
bool (*cmp)(const elem_t&, const elem_t&) =
largest ? elem_greater : elem_less;
if (use_partial_sort) {
std::partial_sort(queue, queue + k, queue + dim_size, cmp);
} else {
Expand Down Expand Up @@ -187,7 +190,7 @@ std::tuple<Tensor&, Tensor&> topk_values(
InvalidArgument,
out);

constexpr auto name = "topk.values";
static constexpr auto name = "topk.values";

if (in.numel() == 0 || (k == 0 && in.dim() > 0)) {
return out;
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_var.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ Tensor& var_out(
const size_t num = get_reduced_dim_product(in, dim_list);
const size_t denom = unbiased ? num - 1 : num;

constexpr auto name = "var.out";
static constexpr auto name = "var.out";

ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, name, CTYPE_IN, [&] {
ET_SWITCH_FLOATHBF16_TYPES(out.scalar_type(), ctx, name, CTYPE_OUT, [&] {
Expand Down Expand Up @@ -123,7 +123,7 @@ Tensor& var_correction_out(
InvalidArgument,
out);

constexpr auto name = "var.correction_out";
static constexpr auto name = "var.correction_out";

double correction_val = 1;
if (correction.has_value()) {
Expand Down
14 changes: 7 additions & 7 deletions kernels/portable/cpu/op_view_as_real_copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,13 @@ Tensor& view_as_real_copy_out(
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out);

constexpr auto op_name = "view_as_real_copy.out";
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh missed one.


ET_SWITCH_COMPLEXH_TYPES(self.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
ET_SWITCH_FLOATH_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] {
_to_impl<CTYPE_IN, CTYPE_OUT>(self, out);
});
});
ET_SWITCH_COMPLEXH_TYPES(
self.scalar_type(), ctx, "view_as_real_copy.out", CTYPE_IN, [&] {
ET_SWITCH_FLOATH_TYPES(
out.scalar_type(), ctx, "view_as_real_copy.out", CTYPE_OUT, [&] {
_to_impl<CTYPE_IN, CTYPE_OUT>(self, out);
});
});

return out;
}
Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/util/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@ endif()

list(TRANSFORM _kernels_util_all_deps__srcs PREPEND "${EXECUTORCH_ROOT}/")

set(_common_compile_options -Wno-deprecated-declarations)
set(_common_compile_options
$<$<CXX_COMPILER_ID:MSVC>:/wd4996>
$<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wno-deprecated-declarations>
)

add_library(kernels_util_all_deps ${_kernels_util_all_deps__srcs})
target_link_libraries(kernels_util_all_deps PRIVATE executorch_core)
Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/util/elementwise_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ inline void dtype_specialized_elementwise_fn_impl(
static_assert(
(std::is_same_v<Args, std::pair<const Tensor*, SupportedTensorDtypes>> &&
...));
constexpr auto kNumInputs = sizeof...(inputs);
static constexpr auto kNumInputs = sizeof...(inputs);
// All inputs must be of type CTYPE_COMPUTE.
ET_DCHECK(
((inputs.first->scalar_type() ==
Expand Down Expand Up @@ -229,7 +229,7 @@ inline void apply_elementwise_fn_generic_impl(
const Tensor& out,
SupportedTensorDtypes out_dtypes,
Args... inputs) {
constexpr auto kNumInputs = sizeof...(inputs);
static constexpr auto kNumInputs = sizeof...(inputs);

struct InputInfo {
load_to_compute_fn<CTYPE_COMPUTE> load_to_compute;
Expand Down
Loading
Loading