Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP]Add support to (de)serialize Result output names #28595

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/core/dev_api/openvino/core/descriptor_tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,5 +88,18 @@ struct OPENVINO_API TensorExtension {
* @param input_descriptor Input descriptor to set in output as shared tensor.
*/
OPENVINO_API void set_shared_tensor(Output& output_descriptor, const Input& input_descriptor);

/**
* @brief Retrieves the set of output names assigned to tensor descriptor.
*
* This function returns tensor descriptor names:
* - same as ov::descriptor::Tensor::get_names() for regular descriptor.
* - return specific output names for shared tensor.
*
* @param descriptor The tensor descriptor to get names.
* @return The set of output names.
*/
OPENVINO_API const std::unordered_set<std::string>& get_assigned_names(const Tensor& descriptor);

} // namespace descriptor
} // namespace ov
13 changes: 13 additions & 0 deletions src/core/src/descriptor/shared_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,10 @@ class SharedTensor : public ITensorDescriptor {
}
}

const auto& get_output_names() const {
return m_output_names;
}

private:
void rm_tensor_output_names() {
auto names = m_shared_tensor->get_names();
Expand Down Expand Up @@ -121,5 +125,14 @@ void set_shared_tensor(Output& output, const Input& input) {
}
}

const std::unordered_set<std::string>& get_assigned_names(const Tensor& tensor) {
if (auto&& descriptor = TensorExtension::get_descriptor(tensor);
auto&& shared_tensor = dynamic_cast<const SharedTensor*>(&descriptor)) {
return shared_tensor->get_output_names();
} else {
return descriptor.get_names();
}
}

} // namespace descriptor
} // namespace ov
84 changes: 49 additions & 35 deletions src/core/src/pass/serialize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <unordered_set>

#include "openvino/core/coordinate_diff.hpp"
#include "openvino/core/descriptor_tensor.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/meta_data.hpp"
#include "openvino/core/model.hpp"
Expand Down Expand Up @@ -1076,47 +1077,60 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
}
}
// <layers/output>
if ((node->get_output_size() > 0) && !ov::op::util::is_output(node)) {
pugi::xml_node output = layer.append_child("output");
for (auto& o : node->outputs()) {
pugi::xml_node port = output.append_child("port");
port.append_attribute("id").set_value(port_id++);

const auto& rt_info = o.get_tensor().get_rt_info();
auto port_element_type =
is_fp16_compression_postponed(rt_info) ? ov::element::f16 : o.get_element_type();
if (node->get_output_size() > 0) {
auto serialize_tensor_names = [](const std::unordered_set<std::string>& names) -> std::string {
auto sorted_names = std::vector<std::string>(names.begin(), names.end());
std::sort(sorted_names.begin(), sorted_names.end());

std::string serialized_names;
for (const auto& name : sorted_names) {
if (!serialized_names.empty())
serialized_names += ",";
serialized_names += escape_delim(name);
}
return serialized_names;
};

if (ov::op::util::is_output(node)) {
if (version > 10 && !deterministic) {
// Not serialize output names for deterministic mode (hash) computation as it is optional
// attribute for v11 and not affect on model structure or how it works
if (const auto& names = ov::descriptor::get_assigned_names(node->get_output_tensor(0));
!names.empty()) {
layer.append_attribute("output_names").set_value(serialize_tensor_names(names).c_str());
}
}
} else {
pugi::xml_node output = layer.append_child("output");
for (auto& o : node->outputs()) {
pugi::xml_node port = output.append_child("port");
port.append_attribute("id").set_value(port_id++);

port.append_attribute("precision").set_value(get_precision_name(port_element_type).c_str());
const auto& rt_info = o.get_tensor().get_rt_info();
auto port_element_type =
is_fp16_compression_postponed(rt_info) ? ov::element::f16 : o.get_element_type();

// Sort tensor names
const auto& tensor_names = o.get_tensor().get_names();
std::vector<std::string> vector_names(tensor_names.begin(), tensor_names.end());
sort(vector_names.begin(), vector_names.end());
port.append_attribute("precision").set_value(get_precision_name(port_element_type).c_str());

std::string names;
for (const auto& name : vector_names) {
if (!names.empty())
names += ",";
names += escape_delim(name);
}
if (!names.empty()) {
port.append_attribute("names").set_value(names.c_str());
}
if (const auto& tensor_names = o.get_tensor().get_names(); !tensor_names.empty()) {
port.append_attribute("names").set_value(serialize_tensor_names(tensor_names).c_str());
}

for (const auto& d : o.get_partial_shape()) {
pugi::xml_node dim = port.append_child("dim");
if (d.is_dynamic()) {
dim.append_child(pugi::xml_node_type::node_pcdata).set_value("-1");
} else {
dim.append_child(pugi::xml_node_type::node_pcdata)
.set_value(std::to_string(d.get_length()).c_str());
for (const auto& d : o.get_partial_shape()) {
pugi::xml_node dim = port.append_child("dim");
if (d.is_dynamic()) {
dim.append_child(pugi::xml_node_type::node_pcdata).set_value("-1");
} else {
dim.append_child(pugi::xml_node_type::node_pcdata)
.set_value(std::to_string(d.get_length()).c_str());
}
}
if (version >= 11)
append_runtime_info(port, o.get_rt_info());
}
if (node_type_name == "TensorIterator" || node_type_name == "Loop") {
layer.insert_move_after(output, layer.first_child());
}
if (version >= 11)
append_runtime_info(port, o.get_rt_info());
}
if (node_type_name == "TensorIterator" || node_type_name == "Loop") {
layer.insert_move_after(output, layer.first_child());
}
}

Expand Down
39 changes: 39 additions & 0 deletions src/core/tests/pass/serialization/tensor_names.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,13 @@
#include "common_test_utils/test_common.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/util/common_util.hpp"
#include "read_ir.hpp"

namespace ov::test {
using op::v0::Parameter, op::v0::Result, op::v0::Relu;
using testing::UnorderedElementsAre;

class TensorNameSerializationTest : public ov::test::TestsCommon {
protected:
std::string m_out_xml_path;
Expand Down Expand Up @@ -55,3 +60,37 @@ TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) {
const auto res = fc.compare(result, model);
EXPECT_TRUE(res.valid) << res.message;
}

TEST_F(TensorNameSerializationTest, model_with_specific_output_names) {
const auto make_test_model = [] {
auto input = std::make_shared<Parameter>(element::f32, Shape{1, 3, 10, 10});
input->set_friendly_name("input");
input->output(0).set_names({"input"});
auto relu = std::make_shared<Relu>(input);
relu->set_friendly_name("relu");
relu->output(0).set_names({"relu"});
auto result = std::make_shared<Result>(relu);
result->set_friendly_name("output");
result->output(0).set_names({"output", "identity,output"});
return std::make_shared<ov::Model>(ResultVector{result}, ParameterVector{input}, "Specific output names");
};
const auto model_comparator = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::CONST_VALUES);

const auto ref_model = make_test_model();
ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(ref_model);
const auto read_model = ov::test::readModel(m_out_xml_path, m_out_bin_path);

// Check explicitly output names
EXPECT_THAT(ref_model->output(0).get_node()->get_input_tensor(0).get_names(),
UnorderedElementsAre("output", "identity,output", "relu"));
EXPECT_THAT(ref_model->output(0).get_names(), UnorderedElementsAre("output", "identity,output"));
EXPECT_THAT(read_model->output(0).get_node()->get_input_tensor(0).get_names(),
UnorderedElementsAre("output", "identity,output", "relu"));
EXPECT_THAT(read_model->output(0).get_names(), UnorderedElementsAre("output", "identity,output"));

const auto res = model_comparator.compare(read_model, ref_model);
EXPECT_TRUE(res.valid) << res.message;
}
} // namespace ov::test
59 changes: 38 additions & 21 deletions src/frontends/ir/src/ir_deserializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include <pugixml.hpp>
#include <regex>
#include <string_view>

#include "openvino/core/descriptor_tensor.hpp"
#include "openvino/core/except.hpp"
Expand Down Expand Up @@ -33,6 +34,32 @@

using namespace ov::util;

namespace {
/**
* @brief The function deserialize tensor names.
*
* The each tensor names is separated be comma. The escaped commas in tensor names are replaced by actual comma.
*
* @param tensor_names A string view to serialized tensor names.
* @return A set of unique tensor names.
*/
std::unordered_set<std::string> deserialize_tensor_names(const std::string_view& tensor_names) {
// tensor names are separated by comma, but ignore escaped comma
static const auto splitter = std::regex(R"((?:[^\\,\n]|\\.)+)");

auto output_names = std::unordered_set<std::string>();
std::transform(std::cregex_token_iterator{tensor_names.data(), tensor_names.data() + tensor_names.size(), splitter},
std::cregex_token_iterator{},
std::inserter(output_names, output_names.end()),
[](const auto& token) {
// If tensor name contains escaped comma, replace it with comma
static const auto escaped_delim = std::regex(R"(\\,)");
return std::regex_replace(token.str(), escaped_delim, ",");
});
return output_names;
}
} // namespace

ov::XmlDeserializer::IoMap ov::XmlDeserializer::updated_io_map(const pugi::xml_node& node,
const pugi::xml_node& body_node) {
if (body_node.empty()) {
Expand Down Expand Up @@ -763,21 +790,8 @@ ov::GenericLayerParams ov::XmlDeserializer::parse_generic_params(const pugi::xml
type = ov::element::Type(preStr);
}
port.precision = type;
std::vector<std::string> names;
if (getParameters<std::string>(parentNode, "names", names)) {
for (size_t i = 0; i < names.size(); i++) {
std::string name = names[i];
// Restore original name if it contains delimiter
// getParameters(...) returns the vector of names which were split by delimiter ','
// but some names can contain ',' as a part of name, in this case we use '\' to
// escape delimiter the cycle below is needed in order to find names which contained
// delimiter and restore the original name
while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') {
name.replace(names[i].length() - 1, 1, ",");
name += names[++i];
}
port.names.emplace(name);
}
if (auto names = parentNode.attribute("names"); !names.empty()) {
port.names = deserialize_tensor_names(names.value());
}
return port;
};
Expand Down Expand Up @@ -1026,14 +1040,17 @@ std::shared_ptr<ov::Node> ov::XmlDeserializer::create_node(const std::vector<ov:
}
}

// The IR does not store information about dedicated output names for Result node (model output),
// If IR has no information about dedicated output names for Result node (model output),
// assume all names from parent node are Result's (model's) tensor names.
// Consider adding dedicated RT info with information about Result's output names.
if (auto result = ov::as_type<ov::op::v0::Result>(ovNode.get())) {
if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) {
// Copy names if parent node is not parameter, model's input names should not be dedicated
// output names as they could be removed from Parameter's tensor during model transformations.
result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names());
if (const auto names = node.attribute("output_names"); names.empty()) {
if (!ov::op::util::is_parameter(result->get_input_source_output(0).get_node())) {
// Copy names if parent node is not parameter, model's input names should not be dedicated
// output names as they could be removed from Parameter's tensor during model transformations.
result->get_output_tensor(0).add_names(result->get_input_tensor(0).get_names());
}
} else {
result->get_output_tensor(0).set_names(deserialize_tensor_names(names.value()));
}
}
}
Expand Down
66 changes: 66 additions & 0 deletions src/frontends/ir/tests/frontend_test_basic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1404,6 +1404,72 @@ TEST_F(IRFrontendTests, name_with_comma) {
EXPECT_NE(it, names.end());
}

TEST_F(IRFrontendTests, model_output_name_with_comma) {
std::string testModel = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="relu\,t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1" output_names="relu\,t,custom\,name">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";

std::shared_ptr<ov::Model> model;
OV_ASSERT_NO_THROW(model = core.read_model(testModel, ov::Tensor()));
ASSERT_TRUE(!!model);

{
const auto output_tensor = model->output("custom,name");
EXPECT_EQ(output_tensor.get_names().size(), 2);
EXPECT_EQ(output_tensor.get_node()->get_friendly_name(), "output");
}
{
const auto output_tensor = model->output("relu,t");
EXPECT_EQ(output_tensor.get_node()->get_friendly_name(), "output");
}
}

TEST_F(IRFrontendTests, DetectionOutput) {
std::string testModel = R"V0G0N(
<net name="DetectionOutput" version="11">
Expand Down
Loading