Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix performance inefficiencies coverity scan issues in CPU plugin #28291

Merged
merged 2 commits into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
ov::hint::SchedulingCoreType::ANY_CORE,
false,
true,
sub_streams_table,
std::move(sub_streams_table),
sub_cfg.streamsRankTable[i]};
m_sub_compiled_models.push_back(
std::make_shared<CompiledModel>(model, plugin, sub_cfg, loaded_from_cache, m_sub_memory_manager));
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
}
} else if (key == ov::cache_encryption_callbacks.name()) {
try {
auto encryption_callbacks = val.as<EncryptionCallbacks>();
const auto& encryption_callbacks = val.as<EncryptionCallbacks>();
cacheEncrypt = encryption_callbacks.encrypt;
cacheDecrypt = encryption_callbacks.decrypt;
} catch (ov::Exception&) {
Expand Down
14 changes: 7 additions & 7 deletions src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data
Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block)
: m_eng(eng),
m_pMemDesc(desc),
m_blockHandle(block, this),
m_blockHandle(std::move(block), this),
dnnlMemHandle(this) {
if (desc->getPrecision() == element::string) {
OPENVINO_THROW("[CPU] Memory object can't be created for string data.");
Expand All @@ -95,7 +95,7 @@ Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block
}

Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, MemoryBlockPtr block)
: Memory::Memory(eng, desc.clone(), block) {}
: Memory::Memory(eng, desc.clone(), std::move(block)) {}

size_t Memory::getSize() const {
auto size = getDesc().getCurrentMemSize();
Expand Down Expand Up @@ -592,14 +592,14 @@ bool mbind_move(const dnnl::memory mem, int numaNodeID) {
}

MemoryPtr split_horizontal(const dnnl::engine& eng,
const MemoryPtr src,
const MemoryPtr& src,
int dim,
int w_rank,
int w_size,
bool need_fill) {
auto desc = src->getDescPtr();
auto shape = src->getShape();
auto dims = shape.getDims();
const auto& dims = shape.getDims();
auto prec = src->getPrecision();
if (dim < 0) {
dim += dims.size();
Expand Down Expand Up @@ -655,14 +655,14 @@ MemoryPtr split_horizontal(const dnnl::engine& eng,
}

MemoryPtr split_vertical(const dnnl::engine& eng,
const MemoryPtr src,
const MemoryPtr& src,
int dim,
int w_rank,
int w_size,
bool need_fill) {
auto desc = src->getDescPtr();
auto shape = src->getShape();
auto dims = shape.getDims();
const auto& shape = src->getShape();
const auto& dims = shape.getDims();
auto prec = src->getPrecision();
if (dim < 0) {
dim += dims.size();
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/cpu_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ using MemoryBlockCPtr = std::shared_ptr<const IMemoryBlockObserver>;

class DnnlMemBlockHandle {
public:
DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(pBlock), m_pMem(pMem) {
DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(std::move(pBlock)), m_pMem(pMem) {
if (m_pMemBlock) {
m_pMemBlock->registerMemory(m_pMem);
}
Expand Down Expand Up @@ -447,13 +447,13 @@ bool mbind_move(const MemoryCPtr mem, int numaNodeID);
bool mbind_move(const dnnl::memory mem, int numaNodeID);

MemoryPtr split_horizontal(const dnnl::engine& eng,
const MemoryPtr src,
const MemoryPtr& src,
int dim,
int w_rank,
int w_size,
bool need_fill = true);
MemoryPtr split_vertical(const dnnl::engine& eng,
const MemoryPtr src,
const MemoryPtr& src,
int dim,
int w_rank,
int w_size,
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/edge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ void Edge::allocate(MemoryBlockPtr memBlock) {

auto allocateFunc = [OV_CAPTURE_CPY_AND_THIS](const MemoryDesc& inputDesc) -> MemoryPtr {
auto parentPtr = getParent();
return std::make_shared<Memory>(parentPtr->getEngine(), inputDesc, memBlock);
return std::make_shared<Memory>(parentPtr->getEngine(), inputDesc, std::move(memBlock));
};

allocateCommon(allocateFunc);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ void Graph::Replicate(const std::shared_ptr<const ov::Model>& model,
}

// Add stub output node for unused data
for (auto unusedOutput : unusedOutputs) {
for (const auto& unusedOutput : unusedOutputs) {
auto parentNode = op2node[unusedOutput.get_node_shared_ptr()];
const auto port = unusedOutput.get_index();
const auto nodeName =
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/graph_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ GraphContext::GraphContext(const Config& config,
weightsCache(std::move(w_cache)),
isGraphQuantizedFlag(isGraphQuantized),
streamExecutor(streamExecutor),
subMemoryManager(sub_memory_manager),
subMemoryManager(std::move(sub_memory_manager)),
memoryStatesRegister(std::make_shared<node::MemoryStatesRegister>()),
networkMemoryControl(std::make_shared<NetworkMemoryControl>()) {
rtParamsCache = std::make_shared<MultiCache>(config.rtCacheCapacity);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ void SyncInferRequest::change_default_ptr(Graph& graph) {
: // then swap internal buffer to avoid data corruption
controlBlock.currentMemBlock(); // else reuse the existing buffer

outputMemBlock->setMemBlockResize(memBlock);
outputMemBlock->setMemBlockResize(std::move(memBlock));
DEBUG_LOG("reset proxy ",
outputMemBlock,
", actual ",
Expand Down
6 changes: 4 additions & 2 deletions src/plugins/intel_cpu/src/memory_control.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ namespace {

class StaticPartitionMemoryBlock : public IMemoryBlockObserver {
public:
StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset) : m_pBlock(pBlock), m_offset(offset) {
StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset)
: m_pBlock(std::move(pBlock)),
m_offset(offset) {
OPENVINO_ASSERT(m_pBlock, "Memory block is uninitialized");
}

Expand Down Expand Up @@ -410,7 +412,7 @@ edgeClusters MemoryControl::findEdgeClusters(const std::vector<EdgePtr>& graphEd
}

MemoryControl& NetworkMemoryControl::createMemoryControlUnit(std::vector<size_t> syncInds) {
m_controlUnits.emplace_back(std::unique_ptr<MemoryControl>(new MemoryControl(syncInds)));
m_controlUnits.emplace_back(std::unique_ptr<MemoryControl>(new MemoryControl(std::move(syncInds))));
return *(m_controlUnits.back());
}

Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/nodes/composite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void Composite::selectOptimalPrimitiveDescriptor() {
for (size_t i = 0; i < getParentEdges().size(); i++) {
auto desc = getParentOutputMemDesc(getParentEdgeAt(i));
inConfs.emplace_back(desc);
graphInputConfig.emplace_back(node::Input::InputConfig{desc, true});
graphInputConfig.emplace_back(node::Input::InputConfig{std::move(desc), true});
}

std::vector<Input::OutputConfig> graphOutputConfig;
Expand All @@ -65,7 +65,7 @@ void Composite::selectOptimalPrimitiveDescriptor() {
outConfs.emplace_back(desc);
}

const NodeConfig config(inConfs, outConfs);
const NodeConfig config(std::move(inConfs), std::move(outConfs));

supportedPrimitiveDescriptors.clear();
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::undef);
Expand Down Expand Up @@ -99,7 +99,7 @@ void Composite::execute(dnnl::stream) {
}

void Composite::executeDynamicImpl(dnnl::stream strm) {
execute(strm);
execute(std::move(strm));

// since the shape inference is not performed for the composite node
// a memory of the extra child edges, attached to the output ports
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/fullyconnected.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ void FullyConnected::needPrepareParamsForTensorParallel() {
};

int dim = -1;
auto dst_shape = dstMemoryBuffer->getShape();
const auto& dst_shape = dstMemoryBuffer->getShape();
auto dst_desc = dstMemoryBuffer->getDescPtr();
auto dims = dst_shape.getDims();
if (dim < 0) {
Expand Down Expand Up @@ -625,7 +625,7 @@ void FullyConnected::needUpdateTensorParalelConfig() {
// 1. weight shape is dynamic
// 2. last dim can be splited.
if (tp_cfg.enable_tensor_parallel) {
auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape();
const auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape();
if (shape.isDynamic()) {
tp_cfg.enable_tensor_parallel = false;
} else if (shape.getDims()[0] < static_cast<size_t>(tp_cfg.w_size)) {
Expand Down
3 changes: 1 addition & 2 deletions src/plugins/intel_cpu/src/nodes/llm_mlp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,8 @@ class LinearGateUp {
blkN++;
}
if (blkN) {
auto shared_atomic = std::make_shared<std::atomic_int>(0);
auto& work = works[ithr];
work.sync_flag = shared_atomic;
work.sync_flag = std::make_shared<std::atomic_int>(0);
work.blk_K_size = cache_blk_k_size;

work.n0 = (start_blkN)*REG_BLK_N_SIZE;
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/pad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ void Pad::createPrimitive() {
prepareParams();
if (padValue) {
// restore original memory object
srcMemory[PAD_VALUE_ID] = padValue;
srcMemory[PAD_VALUE_ID] = std::move(padValue);
}

updateLastInputDims();
Expand Down
7 changes: 3 additions & 4 deletions src/plugins/intel_cpu/src/nodes/reorder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn
// useful in situations when rank in IR does not much rank that is required by the oneDNN primitive,
// but the input tensor can be reshaped (e.g. weights for grouped convolutions, biases etc.)
if (srcDesc->hasLayoutType(LayoutType::ncsp) && srcDesc->getShape().getRank() != dstDesc->getShape().getRank()) {
const auto newDims = dstDesc->getShape().getStaticDims();
const auto& newDims = dstDesc->getShape().getStaticDims();
const auto newFormat = DnnlExtensionUtils::GetPlainFormatByRank(newDims.size());

src_desc = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(newDims),
Expand All @@ -307,9 +307,8 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn
DEBUG_LOG("CreateReorderPrimitive is called for node", getName(), " src desc: ", src_desc, " dst_desc: ", dst_desc);
CPU_NODE_ASSERT(src_desc.get_ndims() == dst_desc.get_ndims(),
"OneDNN doesn't support reorder with different ranks.");
auto result = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc);
CPU_NODE_ASSERT(result, "could not create reorder primitive: unsupported reorder case.");
prim = result;
prim = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc);
CPU_NODE_ASSERT(prim, "could not create reorder primitive: unsupported reorder case.");

selectedPD->setImplementationType(
parse_impl_name(DnnlExtensionUtils::query_impl_info_str(prim.get_primitive_desc())));
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/rms_norm.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class RMSNorm : public Node {
return false;
}
void executeDynamicImpl(dnnl::stream strm) override {
execute(strm);
execute(std::move(strm));
}
void initSupportedPrimitiveDescriptors() override;
void execute(dnnl::stream strm) override;
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1030,7 +1030,7 @@ void RNN::prepareMemory(const DnnlMemoryDescPtr& new_desc, size_t idx) {
res_ptr = create();
}

internalBlobMemory[idx] = res_ptr;
internalBlobMemory[idx] = std::move(res_ptr);
}

void RNN::copyWeightsData() {
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ bool StringTensorPack::needPrepareParams() const {
}

void StringTensorPack::executeDynamicImpl(dnnl::stream strm) {
execute(strm);
execute(std::move(strm));
}

template <class T_idx>
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ void StringTensorUnpack::executeDynamicImpl(dnnl::stream strm) {
totalCharLength += srcData[i].length();
}
redefineOutputMemory({srcDataDims, srcDataDims, {totalCharLength}});
execute(strm);
execute(std::move(strm));
}

void StringTensorUnpack::execute(dnnl::stream strm) {
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/partitioned_mem_blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class PartitionedMemoryBlock : public IMemoryBlockObserver {
size_t total_chunks = 1,
ptrdiff_t offset_chunks = 0,
size_t size_chunks = 1)
: m_pBlock(pBlock),
: m_pBlock(std::move(pBlock)),
m_total_chunks(total_chunks),
m_offset_chunks(offset_chunks),
m_size_chunks(size_chunks) {
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::import_model(std::istream& model_str
CacheDecrypt decrypt{codec_xor};
bool decript_from_string = false;
if (config.count(ov::cache_encryption_callbacks.name())) {
auto encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as<EncryptionCallbacks>();
const auto& encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as<EncryptionCallbacks>();
decrypt.m_decrypt_str = encryption_callbacks.decrypt;
decript_from_string = true;
}
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/proxy_mem_blk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ void ProxyMemoryBlock::setMemBlock(std::shared_ptr<IMemoryBlock> pBlock) {
return;
}

m_pMemBlock = pBlock;
m_pMemBlock = std::move(pBlock);
notifyUpdate();
}

Expand All @@ -24,7 +24,7 @@ void ProxyMemoryBlock::setMemBlockResize(std::shared_ptr<IMemoryBlock> pBlock) {
return;
}

m_pMemBlock = pBlock;
m_pMemBlock = std::move(pBlock);
m_pMemBlock->resize(m_size);
notifyUpdate();
}
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/proxy_mem_blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class ProxyMemoryBlock : public IMemoryBlockObserver {
ProxyMemoryBlock() : m_pOrigBlock(std::make_shared<MemoryBlockWithReuse>()), m_pMemBlock(m_pOrigBlock) {}
explicit ProxyMemoryBlock(std::shared_ptr<IMemoryBlock> pBlock) {
OPENVINO_ASSERT(pBlock, "Memory block is uninitialized");
m_pMemBlock = pBlock;
m_pMemBlock = std::move(pBlock);
}

void* getRawPtr() const noexcept override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace node {

class RMSNormShapeInferFactory : public ShapeInferFactory {
public:
RMSNormShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
RMSNormShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(std::move(op)) {}
ShapeInferPtr makeShapeInfer() const override;

private:
Expand Down
Loading