Skip to content

Commit

Permalink
Fix medium severity issues reported in CPU plugin (#28294)
Browse files Browse the repository at this point in the history
### Details:
 - Fix medium severity coverity issues:
   * Division or modulo by float zero: 1565219
   * Uninitialized scalar field: 1565060
   * Uninitialized pointer field: 1565135
   * Identical code for different branches: 1565044

### Tickets:
 - [CVS-157247](https://jira.devtools.intel.com/browse/CVS-157247)
  • Loading branch information
aobolensk authored Jan 7, 2025
1 parent 5059c0e commit 66f1b24
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 7 deletions.
1 change: 1 addition & 0 deletions src/plugins/intel_cpu/src/nodes/kernels/x64/rms_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ void jit_rms_kernel<isa>::generate() {
reduce_vmm_to_scalar(vmm_rsqrt, vmm_sum0, vmm_sum1, vmm_sum3, vec_size);

// mean(x^2)
OPENVINO_ASSERT(m_jcp.data_size != 0);
mov(reg_tmp.cvt32(), float2int(1.0f / m_jcp.data_size));
vmovd(xmm_tmp, reg_tmp.cvt32());
vmulss(xmm_rsqrt, xmm_rsqrt, xmm_tmp);
Expand Down
4 changes: 1 addition & 3 deletions src/plugins/intel_cpu/src/nodes/llm_mlp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ class LinearKsplit2 {

LinearKsplit2() {}

ReduceAdd2bh* p_jit_reduce2bh;

// weight [N, K]
// Gate & Up are interleaved in N dimension: 16-gate / 16-up
// and post-ops will compute silu(gate)*up in unit of 16 elements
Expand Down Expand Up @@ -201,7 +199,7 @@ class LinearGateUp {
bool quantized_int8 = config.gate_up_quantized;

auto reg_blk_K_size = quantized_int8 ? REG_BLK_K_SIZE_I8 : REG_BLK_K_SIZE;
auto cache_blk_k_size = quantized_int8 ? CACHE_BLK_K_SIZE : CACHE_BLK_K_SIZE;
auto cache_blk_k_size = CACHE_BLK_K_SIZE;
auto weight_element_size = quantized_int8 ? sizeof(int8_t) : sizeof(ov::float16);

// prepare weights, split N among threads
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/qkv_proj.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ struct QKVProjection::Executor : public QKVProjection::ExecutorBase {
// and activations will be dynamically per-token quantized and using AMX-INT8 to get the result
bool quantized_int8 = m_node->m_config.quantized;

auto cache_blk_k_size = quantized_int8 ? CACHE_BLK_K_SIZE : CACHE_BLK_K_SIZE;
auto cache_blk_k_size = CACHE_BLK_K_SIZE;
auto weight_element_size = quantized_int8 ? sizeof(int8_t) : sizeof(ov::float16);

auto K = w0.size(1);
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/nodes/scaled_attn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ struct MHAKernel {
}

PlainTensor causal_mask;
bool select_nfltmax_at_0; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) {
causal_mask = mask;
select_nfltmax_at_0 = _select_nfltmax_at_0;
Expand Down Expand Up @@ -526,7 +526,7 @@ struct MHAKernel<ScaledDotProductAttention::KT_ACL, T> {
}

PlainTensor causal_mask;
bool select_nfltmax_at_0; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) {
causal_mask = mask;
select_nfltmax_at_0 = _select_nfltmax_at_0;
Expand Down Expand Up @@ -674,7 +674,7 @@ struct MHAKernel<ScaledDotProductAttention::KT_MLAS, float> {
}

PlainTensor causal_mask;
bool select_nfltmax_at_0; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this
void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) {
causal_mask = mask;
select_nfltmax_at_0 = _select_nfltmax_at_0;
Expand Down

0 comments on commit 66f1b24

Please sign in to comment.