diff --git a/src/core/dev_api/openvino/core/descriptor_tensor.hpp b/src/core/dev_api/openvino/core/descriptor_tensor.hpp index c84d28cf180816..bc07abd98c42e6 100644 --- a/src/core/dev_api/openvino/core/descriptor_tensor.hpp +++ b/src/core/dev_api/openvino/core/descriptor_tensor.hpp @@ -88,5 +88,18 @@ struct OPENVINO_API TensorExtension { * @param input_descriptor Input descriptor to set in output as shared tensor. */ OPENVINO_API void set_shared_tensor(Output& output_descriptor, const Input& input_descriptor); + +/** + * @brief Retrieves the set of output names assigned to tensor descriptor. + * + * This function returns tensor descriptor names: + * - same as ov::descriptor::Tensor::get_names() for regular descriptor. + * - return specific output names for shared tensor. + * + * @param descriptor The tensor descriptor to get names. + * @return The set of output names. + */ +OPENVINO_API const std::unordered_set& get_assigned_names(const Tensor& descriptor); + } // namespace descriptor } // namespace ov diff --git a/src/core/src/descriptor/shared_tensor.cpp b/src/core/src/descriptor/shared_tensor.cpp index 1f017594501c1b..9b1c7de011aedb 100644 --- a/src/core/src/descriptor/shared_tensor.cpp +++ b/src/core/src/descriptor/shared_tensor.cpp @@ -90,6 +90,10 @@ class SharedTensor : public ITensorDescriptor { } } + const auto& get_output_names() const { + return m_output_names; + } + private: void rm_tensor_output_names() { auto names = m_shared_tensor->get_names(); @@ -121,5 +125,14 @@ void set_shared_tensor(Output& output, const Input& input) { } } +const std::unordered_set& get_assigned_names(const Tensor& tensor) { + if (auto&& descriptor = TensorExtension::get_descriptor(tensor); + auto&& shared_tensor = dynamic_cast(&descriptor)) { + return shared_tensor->get_output_names(); + } else { + return descriptor.get_names(); + } +} + } // namespace descriptor } // namespace ov diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index d468e03db967fa..300e11d20a5fbc 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -13,6 +13,7 @@ #include #include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/descriptor_tensor.hpp" #include "openvino/core/except.hpp" #include "openvino/core/meta_data.hpp" #include "openvino/core/model.hpp" @@ -1076,47 +1077,60 @@ void ngfunction_2_ir(pugi::xml_node& netXml, } } // - if ((node->get_output_size() > 0) && !ov::op::util::is_output(node)) { - pugi::xml_node output = layer.append_child("output"); - for (auto& o : node->outputs()) { - pugi::xml_node port = output.append_child("port"); - port.append_attribute("id").set_value(port_id++); - - const auto& rt_info = o.get_tensor().get_rt_info(); - auto port_element_type = - is_fp16_compression_postponed(rt_info) ? ov::element::f16 : o.get_element_type(); + if (node->get_output_size() > 0) { + auto serialize_tensor_names = [](const std::unordered_set& names) -> std::string { + auto sorted_names = std::vector(names.begin(), names.end()); + std::sort(sorted_names.begin(), sorted_names.end()); + + std::string serialized_names; + for (const auto& name : sorted_names) { + if (!serialized_names.empty()) + serialized_names += ","; + serialized_names += escape_delim(name); + } + return serialized_names; + }; + + if (ov::op::util::is_output(node)) { + if (version > 10 && !deterministic) { + // Not serialize output names for deterministic mode (hash) computation as it is optional + // attribute for v11 and not affect on model structure or how it works + if (const auto& names = ov::descriptor::get_assigned_names(node->get_output_tensor(0)); + !names.empty()) { + layer.append_attribute("output_names").set_value(serialize_tensor_names(names).c_str()); + } + } + } else { + pugi::xml_node output = layer.append_child("output"); + for (auto& o : node->outputs()) { + pugi::xml_node port = output.append_child("port"); + port.append_attribute("id").set_value(port_id++); - port.append_attribute("precision").set_value(get_precision_name(port_element_type).c_str()); + const auto& rt_info = o.get_tensor().get_rt_info(); + auto port_element_type = + is_fp16_compression_postponed(rt_info) ? ov::element::f16 : o.get_element_type(); - // Sort tensor names - const auto& tensor_names = o.get_tensor().get_names(); - std::vector vector_names(tensor_names.begin(), tensor_names.end()); - sort(vector_names.begin(), vector_names.end()); + port.append_attribute("precision").set_value(get_precision_name(port_element_type).c_str()); - std::string names; - for (const auto& name : vector_names) { - if (!names.empty()) - names += ","; - names += escape_delim(name); - } - if (!names.empty()) { - port.append_attribute("names").set_value(names.c_str()); - } + if (const auto& tensor_names = o.get_tensor().get_names(); !tensor_names.empty()) { + port.append_attribute("names").set_value(serialize_tensor_names(tensor_names).c_str()); + } - for (const auto& d : o.get_partial_shape()) { - pugi::xml_node dim = port.append_child("dim"); - if (d.is_dynamic()) { - dim.append_child(pugi::xml_node_type::node_pcdata).set_value("-1"); - } else { - dim.append_child(pugi::xml_node_type::node_pcdata) - .set_value(std::to_string(d.get_length()).c_str()); + for (const auto& d : o.get_partial_shape()) { + pugi::xml_node dim = port.append_child("dim"); + if (d.is_dynamic()) { + dim.append_child(pugi::xml_node_type::node_pcdata).set_value("-1"); + } else { + dim.append_child(pugi::xml_node_type::node_pcdata) + .set_value(std::to_string(d.get_length()).c_str()); + } } + if (version >= 11) + append_runtime_info(port, o.get_rt_info()); + } + if (node_type_name == "TensorIterator" || node_type_name == "Loop") { + layer.insert_move_after(output, layer.first_child()); } - if (version >= 11) - append_runtime_info(port, o.get_rt_info()); - } - if (node_type_name == "TensorIterator" || node_type_name == "Loop") { - layer.insert_move_after(output, layer.first_child()); } } diff --git a/src/core/tests/pass/serialization/tensor_names.cpp b/src/core/tests/pass/serialization/tensor_names.cpp index cdbaf2d35703d5..864d4a1a29163f 100644 --- a/src/core/tests/pass/serialization/tensor_names.cpp +++ b/src/core/tests/pass/serialization/tensor_names.cpp @@ -9,8 +9,13 @@ #include "common_test_utils/test_common.hpp" #include "openvino/opsets/opset8.hpp" #include "openvino/pass/serialize.hpp" +#include "openvino/util/common_util.hpp" #include "read_ir.hpp" +namespace ov::test { +using op::v0::Parameter, op::v0::Result, op::v0::Relu; +using testing::UnorderedElementsAre; + class TensorNameSerializationTest : public ov::test::TestsCommon { protected: std::string m_out_xml_path; @@ -55,3 +60,37 @@ TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { const auto res = fc.compare(result, model); EXPECT_TRUE(res.valid) << res.message; } + +TEST_F(TensorNameSerializationTest, model_with_specific_output_names) { + const auto make_test_model = [] { + auto input = std::make_shared(element::f32, Shape{1, 3, 10, 10}); + input->set_friendly_name("input"); + input->output(0).set_names({"input"}); + auto relu = std::make_shared(input); + relu->set_friendly_name("relu"); + relu->output(0).set_names({"relu"}); + auto result = std::make_shared(relu); + result->set_friendly_name("output"); + result->output(0).set_names({"output", "identity,output"}); + return std::make_shared(ResultVector{result}, ParameterVector{input}, "Specific output names"); + }; + const auto model_comparator = FunctionsComparator::with_default() + .enable(FunctionsComparator::ATTRIBUTES) + .enable(FunctionsComparator::CONST_VALUES); + + const auto ref_model = make_test_model(); + ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(ref_model); + const auto read_model = ov::test::readModel(m_out_xml_path, m_out_bin_path); + + // Check explicitly output names + EXPECT_THAT(ref_model->output(0).get_node()->get_input_tensor(0).get_names(), + UnorderedElementsAre("output", "identity,output", "relu")); + EXPECT_THAT(ref_model->output(0).get_names(), UnorderedElementsAre("output", "identity,output")); + EXPECT_THAT(read_model->output(0).get_node()->get_input_tensor(0).get_names(), + UnorderedElementsAre("output", "identity,output", "relu")); + EXPECT_THAT(read_model->output(0).get_names(), UnorderedElementsAre("output", "identity,output")); + + const auto res = model_comparator.compare(read_model, ref_model); + EXPECT_TRUE(res.valid) << res.message; +} +} // namespace ov::test diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 33e77d147557b0..55bc6dc65b80a6 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -6,6 +6,7 @@ #include #include +#include #include "openvino/core/descriptor_tensor.hpp" #include "openvino/core/except.hpp" @@ -33,6 +34,32 @@ using namespace ov::util; +namespace { +/** + * @brief The function deserialize tensor names. + * + * The each tensor names is separated be comma. The escaped commas in tensor names are replaced by actual comma. + * + * @param tensor_names A string view to serialized tensor names. + * @return A set of unique tensor names. + */ +std::unordered_set deserialize_tensor_names(const std::string_view& tensor_names) { + // tensor names are separated by comma, but ignore escaped comma + static const auto splitter = std::regex(R"((?:[^\\,\n]|\\.)+)"); + + auto output_names = std::unordered_set(); + std::transform(std::cregex_token_iterator{tensor_names.data(), tensor_names.data() + tensor_names.size(), splitter}, + std::cregex_token_iterator{}, + std::inserter(output_names, output_names.end()), + [](const auto& token) { + // If tensor name contains escaped comma, replace it with comma + static const auto escaped_delim = std::regex(R"(\\,)"); + return std::regex_replace(token.str(), escaped_delim, ","); + }); + return output_names; +} +} // namespace + ov::XmlDeserializer::IoMap ov::XmlDeserializer::updated_io_map(const pugi::xml_node& node, const pugi::xml_node& body_node) { if (body_node.empty()) { @@ -763,21 +790,8 @@ ov::GenericLayerParams ov::XmlDeserializer::parse_generic_params(const pugi::xml type = ov::element::Type(preStr); } port.precision = type; - std::vector names; - if (getParameters(parentNode, "names", names)) { - for (size_t i = 0; i < names.size(); i++) { - std::string name = names[i]; - // Restore original name if it contains delimiter - // getParameters(...) returns the vector of names which were split by delimiter ',' - // but some names can contain ',' as a part of name, in this case we use '\' to - // escape delimiter the cycle below is needed in order to find names which contained - // delimiter and restore the original name - while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') { - name.replace(names[i].length() - 1, 1, ","); - name += names[++i]; - } - port.names.emplace(name); - } + if (auto names = parentNode.attribute("names"); !names.empty()) { + port.names = deserialize_tensor_names(names.value()); } return port; }; @@ -1026,14 +1040,17 @@ std::shared_ptr ov::XmlDeserializer::create_node(const std::vector(ovNode.get())) { - if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) { - // Copy names if parent node is not parameter, model's input names should not be dedicated - // output names as they could be removed from Parameter's tensor during model transformations. - result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names()); + if (const auto names = node.attribute("output_names"); names.empty()) { + if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) { + // Copy names if parent node is not parameter, model's input names should not be dedicated + // output names as they could be removed from Parameter's tensor during model transformations. + result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names()); + } + } else { + result->get_output_tensor(0).set_names(deserialize_tensor_names(names.value())); } } } diff --git a/src/frontends/ir/tests/frontend_test_basic.cpp b/src/frontends/ir/tests/frontend_test_basic.cpp index 3bf6e694eba02c..c75d817e29ad2b 100644 --- a/src/frontends/ir/tests/frontend_test_basic.cpp +++ b/src/frontends/ir/tests/frontend_test_basic.cpp @@ -1404,6 +1404,72 @@ TEST_F(IRFrontendTests, name_with_comma) { EXPECT_NE(it, names.end()); } +TEST_F(IRFrontendTests, model_output_name_with_comma) { + std::string testModel = R"V0G0N( + + + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + +)V0G0N"; + + std::shared_ptr model; + OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor())); + ASSERT_TRUE(!!model); + + { + const auto output_tensor = model->output("custom,name"); + EXPECT_EQ(output_tensor.get_names().size(), 2); + EXPECT_EQ(output_tensor.get_node()->get_friendly_name(), "output"); + } + { + const auto output_tensor = model->output("relu,t"); + EXPECT_EQ(output_tensor.get_node()->get_friendly_name(), "output"); + } +} + TEST_F(IRFrontendTests, DetectionOutput) { std::string testModel = R"V0G0N(