Skip to content

Commit ed91560

Browse files
authored
[CPU] Apply 'cppcoreguidelines-pro-type-cstyle-cast' clang-tidy remarks (openvinotoolkit#30037)
### Details: - Fix "cppcoreguidelines-pro-type-cstyle-cast" remarks reported by clang-tidy - Enable "cppcoreguidelines-pro-type-cstyle-cast" clang-tidy checks on CI by default ### Tickets: - N/A
1 parent c4b0645 commit ed91560

File tree

5 files changed

+17
-13
lines changed

5 files changed

+17
-13
lines changed

src/plugins/intel_cpu/src/.clang-tidy

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ Checks: >
5252
modernize-*,
5353
misc-*,
5454
cppcoreguidelines-prefer-member-initializer,
55+
cppcoreguidelines-pro-type-cstyle-cast,
5556
readability-container-size-empty,
5657
readability-else-after-return,
5758
-bugprone-easily-swappable-parameters,

src/plugins/intel_cpu/src/dnnl_postops_composer.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -940,8 +940,8 @@ void DnnlPostOpsComposer::appendAttrPostOpsLegacy(const FakeQuantizePostOp& post
940940
}
941941

942942
return ops.append_binarization(dnnl::algorithm::binarization_depthwise,
943-
(const float*)&binarizationThresholds[0],
944-
(const float*)&binarizationOutputMask[0]);
943+
reinterpret_cast<const float*>(binarizationThresholds.data()),
944+
reinterpret_cast<const float*>(binarizationOutputMask.data()));
945945
}
946946

947947
dnnl::algorithm alg = postOp.type() == FakeQuantizePostOp::Type::quantization_only

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ BrgemmCopyBKernel::BrgemmCopyBKernel(const BrgemmCopyBKernelConfig& conf)
164164
status_t BrgemmCopyBKernel::create_kernel() {
165165
const auto code = jit_generator::create_kernel();
166166
OV_CPU_JIT_EMITTER_ASSERT(code == status::success, "Failed to create kernel");
167-
ker_ = (decltype(ker_))jit_ker();
167+
ker_ = reinterpret_cast<decltype(ker_)>(const_cast<uint8_t*>(jit_ker()));
168168
return code;
169169
}
170170

src/plugins/intel_cpu/src/nodes/color_convert.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ void jit_uni_converter::init() {
127127
if (create_kernel() != status::success) {
128128
OPENVINO_THROW("Can't generate jit color converter kernel");
129129
}
130-
_fn = (function_t)jit_ker();
130+
_fn = reinterpret_cast<function_t>(const_cast<uint8_t*>(jit_ker()));
131131
}
132132

133133
template <size_t N>

src/plugins/intel_cpu/src/nodes/input.cpp

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,9 @@ struct jit_has_special_value_base : public jit_generator {
3737
}
3838

3939
fn_t get() {
40-
return jit_ker() || create_kernel() == dnnl::impl::status::success ? (fn_t)jit_ker() : nullptr;
40+
return jit_ker() || create_kernel() == dnnl::impl::status::success
41+
? reinterpret_cast<fn_t>(const_cast<uint8_t*>(jit_ker()))
42+
: nullptr;
4143
}
4244

4345
protected:
@@ -199,9 +201,9 @@ struct jit_has_subnormals : public jit_has_special_value_base {
199201

200202
// Initialize necessary consts
201203
uni_vpxor(zero, zero, zero);
202-
mov(reg_mask_addr, (size_t)exponent_mask_data);
204+
mov(reg_mask_addr, reinterpret_cast<size_t>(exponent_mask_data));
203205
uni_vmovdqu(exponent_mask, ptr[reg_mask_addr]);
204-
mov(reg_mask_addr, (size_t)mantissa_mask_data);
206+
mov(reg_mask_addr, reinterpret_cast<size_t>(mantissa_mask_data));
205207
uni_vmovdqu(mantissa_mask, ptr[reg_mask_addr]);
206208

207209
// Main loop
@@ -273,9 +275,9 @@ struct jit_has_bf16_overflows : public jit_has_special_value_base {
273275

274276
// Initialize necessary consts
275277
uni_vpxor(zero, zero, zero);
276-
mov(reg_mask_addr, (size_t)bf16_max_mask_data);
278+
mov(reg_mask_addr, reinterpret_cast<size_t>(bf16_max_mask_data));
277279
uni_vmovdqu(bf16_max_mask, ptr[reg_mask_addr]);
278-
mov(reg_mask_addr, (size_t)bf16_min_mask_data);
280+
mov(reg_mask_addr, reinterpret_cast<size_t>(bf16_min_mask_data));
279281
uni_vmovdqu(bf16_min_mask, ptr[reg_mask_addr]);
280282

281283
// Main loop
@@ -417,10 +419,11 @@ void Input::cloneBlobIfRequired() {
417419
std::atomic<bool> has_bf16_overflows_local(false);
418420
if (needFlushDenormalsToZero || do_bf16_saturation_check) {
419421
parallel_for(iterations_num, [&](int n) {
420-
auto ptr = u32data + n * batch_size;
421-
jit_has_special_value_base::args_t args = {reinterpret_cast<float const*>(ptr),
422-
std::min(batch_size, (size_t)(u32data + size - ptr)),
423-
false};
422+
auto ptr = f32data + n * batch_size;
423+
jit_has_special_value_base::args_t args = {
424+
reinterpret_cast<const float*>(ptr),
425+
std::min(batch_size, static_cast<size_t>(f32data + size - ptr)),
426+
false};
424427

425428
if (needFlushDenormalsToZero && !has_subnormals_local) {
426429
fn(&args);

0 commit comments

Comments
 (0)