diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/transformations_pipeline.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/transformations_pipeline.hpp index 743ca31fbee01a..ca2d4ad81dc296 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/transformations_pipeline.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/transformations_pipeline.hpp @@ -11,6 +11,7 @@ #include "intel_gpu/runtime/execution_config.hpp" #include "intel_gpu/runtime/device.hpp" +#include "transformations/convert_precision.hpp" namespace ov::intel_gpu { @@ -21,6 +22,8 @@ class TransformationsPipeline { void apply(std::shared_ptr func); private: + static bool fuse_type_to_convert(const std::shared_ptr& node, const precisions_map& precisions); + const ExecutionConfig& config; std::shared_ptr m_context; cldnn::device_info device_info; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index f036afc0cd59ad..425e7fc6265ee0 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -12,6 +12,7 @@ #include #include +#include "openvino/opsets/opset10.hpp" #include "intel_gpu/plugin/transformations_pipeline.hpp" #include "intel_gpu/runtime/debug_configuration.hpp" #include "intel_gpu/runtime/itt.hpp" @@ -281,6 +282,49 @@ extern bool query_microkernels_supported(cldnn::engine& e, const cldnn::Executio namespace ov::intel_gpu { +bool TransformationsPipeline::fuse_type_to_convert(const std::shared_ptr& node, const precisions_map& precisions) { + auto convert = ov::as_type_ptr(node); + if (!convert) + return false; + const auto& from = node->get_output_element_type(0); + auto it = precisions.find(from); + if (it == precisions.end()) + return false; + const auto& to = it->second; + + if (convert->get_convert_element_type() == ov::element::boolean && to.is_integral_number()) { + // For Convert node, converting precision from numerical data types to boolean will lead to mathematical + // error, because here the output precision boolean is replaced by u8: + // - floating point value 0.01 is converted to be 1 for boolean, but 0 for u8 - need to insert Ceil. + // - either float or int values should be clipped with the interval [0; 1] to mimic bool cast behavior, i.e. + // 0 - is false, 1 - is true + // - to perform clamping correctly an Abs op should be inserted before Clamp + // Thus an Abs, Ceil and Clamp nodes should be added before the Convert node for this scenario. + ov::pass::NodeRegistry reg; + const auto& in_prec = convert->get_input_element_type(0); + auto parent_node = convert->input_value(0).get_node_shared_ptr(); + auto item = precisions.find(in_prec); + if (item != precisions.end()) { + // Add convert node for unsupported precision, such as FP64 or INT64 + parent_node = reg.make(parent_node, item->second); + } + if (in_prec.is_signed()) { + parent_node = reg.make(parent_node); + } + if (in_prec.is_real()) { + parent_node = reg.make(parent_node); + } + parent_node = reg.make(parent_node, 0, 1); + const auto new_convert = reg.make(parent_node, to); + new_convert->set_friendly_name(convert->get_friendly_name()); + ov::copy_runtime_info(convert, reg.get()); + ov::replace_node(convert, new_convert); + return true; + } + convert->set_convert_element_type(to); + return true; +} + void TransformationsPipeline::apply(std::shared_ptr func) { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "TransformationsPipeline::apply"); using const_node_ptr = const std::shared_ptr; @@ -403,6 +447,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { const bool keep_precision_sensitive_in_fp32_1 = true; const bool convert_input_output_precision = false; const bool store_original_precision_as_rt_attribute = true; + manager.register_pass(fp_convert_precision_map, empty_fuse_map, keep_precision_sensitive_in_fp32_1, @@ -514,8 +559,11 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); const bool keep_precision_sensitive_in_fp32_2 = true; + + // To convert to f16 input to boolean which is converted to u8, add abs + ceiling + clamp before convert. + type_to_fuse_map type_to_fuse = {{ov::opset10::Convert::get_type_info_static(), fuse_type_to_convert}}; manager.register_pass(int_convert_precision_map, - empty_fuse_map, + type_to_fuse, keep_precision_sensitive_in_fp32_2, convert_input_output_precision); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.cpp index 18e6ba1ce4f988..73c2eebca93ada 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.cpp @@ -4,11 +4,76 @@ #include -#include "single_op_tests/conversion.hpp" #include "common_test_utils/test_constants.hpp" +#include "common_test_utils/data_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "conversion.hpp" + +namespace ov { +namespace test { + +void ConvertToBooleanLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { + inputs.clear(); + const auto& funcInputs = function->inputs(); + + auto shape = targetInputStaticShapes.front(); + auto size = shape_size(shape); + auto input_type = funcInputs[0].get_element_type(); + + ov::Tensor tensor = ov::Tensor(input_type, shape); + const auto first_part_size = size / 2; + const auto second_part_size = size - first_part_size; + + // 1). Validate the nearest to zero values (Abs + Ceil) + { + double start_from = -2; + uint32_t range = 4; + int32_t resolution = size; + if (input_type == ov::element::f32) { + auto* rawBlobDataPtr = static_cast(tensor.data()); + ov::test::utils::fill_data_random(rawBlobDataPtr, first_part_size, range, start_from, resolution); + } else if (input_type == ov::element::f16) { + auto* rawBlobDataPtr = static_cast(tensor.data()); + ov::test::utils::fill_data_random(rawBlobDataPtr, first_part_size, range, start_from, resolution); + } else { + FAIL() << "Generating inputs with precision " << input_type.to_string() << " isn't supported, if output precision is boolean."; + } + } + + // 2). Validate the values that are more than UINT8_MAX in absolute (Abs + Min) + { + ov::test::utils::InputGenerateData in_data_neg; + double neg_start_from = -1.5 * std::numeric_limits::max(); + double pos_start_from = 0.5 * std::numeric_limits::max(); + uint32_t range = 256; + auto neg_size = second_part_size / 2; + auto pos_size = second_part_size - neg_size; + int32_t resolution = 1; + + if (input_type == ov::element::f32) { + auto* rawBlobDataPtr = static_cast(tensor.data()); + ov::test::utils::fill_data_random(rawBlobDataPtr + first_part_size, neg_size, range, neg_start_from, resolution); + ov::test::utils::fill_data_random(rawBlobDataPtr + first_part_size + neg_size, pos_size, range, pos_start_from, resolution); + } else if (input_type == ov::element::f16) { + auto* rawBlobDataPtr = static_cast(tensor.data()); + ov::test::utils::fill_data_random(rawBlobDataPtr + first_part_size, neg_size, range, neg_start_from, resolution); + ov::test::utils::fill_data_random(rawBlobDataPtr + first_part_size + neg_size, pos_size, range, pos_start_from, resolution); + } else { + FAIL() << "Generating inputs with precision " << input_type.to_string() << " isn't supported, if output precision is boolean."; + } + } + + inputs.insert({funcInputs[0].get_node_shared_ptr(), tensor}); +} + +} // namespace test +} // namespace ov + namespace { using ov::test::ConversionLayerTest; +using ov::test::ConvertToBooleanLayerTest; + const std::vector conversionOpTypes = { ov::test::utils::ConversionTypes::CONVERT, ov::test::utils::ConversionTypes::CONVERT_LIKE, @@ -32,4 +97,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ConversionLayerTest, ::testing::Values(ov::test::utils::DEVICE_GPU)), ConversionLayerTest::getTestCaseName); +TEST_P(ConvertToBooleanLayerTest, CompareWithRefs) { + run(); +}; + +const std::vector precisions_floating_point = { + ov::element::f32, + ov::element::f16 +}; + +INSTANTIATE_TEST_SUITE_P(smoke_NoReshape, ConvertToBooleanLayerTest, + ::testing::Combine( + ::testing::ValuesIn({ov::test::utils::ConversionTypes::CONVERT}), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShape)), + ::testing::ValuesIn(precisions_floating_point), + ::testing::Values(ov::element::boolean), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ConvertToBooleanLayerTest::getTestCaseName); + } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.hpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.hpp new file mode 100644 index 00000000000000..f34278deaf27d4 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/conversion.hpp @@ -0,0 +1,22 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "gtest/gtest.h" +#include "single_op_tests/conversion.hpp" + + +namespace ov { +namespace test { + +class ConvertToBooleanLayerTest : public ConversionLayerTest { +protected: + void generate_inputs(const std::vector& targetInputStaticShapes) override; +}; + +} // namespace test +} // namespace ov